summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2021-12-09 16:25:45 +0100
committerSebastian Huber <sebastian.huber@embedded-brains.de>2023-08-10 13:39:25 +0200
commitbaba23a7a1e7dc430d24463f58792f4e4607086c (patch)
treee9dd7ae2c2b60ef3d16be96fd394182503ff13d7
parentb7238ded3ffd4581ac940f8d8327a8fbfef90556 (diff)
validation: Test memory allocation
-rw-r--r--spec/build/testsuites/validation/validation-0.yml3
-rw-r--r--testsuites/validation/tc-mem-posix-memalign.c608
-rw-r--r--testsuites/validation/tc-mem-rtems-calloc.c493
-rw-r--r--testsuites/validation/tc-mem-rtems-malloc.c399
4 files changed, 1503 insertions, 0 deletions
diff --git a/spec/build/testsuites/validation/validation-0.yml b/spec/build/testsuites/validation/validation-0.yml
index 20a0432231..f2caba8d56 100644
--- a/spec/build/testsuites/validation/validation-0.yml
+++ b/spec/build/testsuites/validation/validation-0.yml
@@ -13,6 +13,9 @@ links: []
source:
- testsuites/validation/tc-acfg-appl-needs-clock-driver.c
- testsuites/validation/tc-event-send-receive.c
+- testsuites/validation/tc-mem-rtems-calloc.c
+- testsuites/validation/tc-mem-rtems-malloc.c
+- testsuites/validation/tc-mem-posix-memalign.c
- testsuites/validation/tc-thread-idle-body-no-return.c
- testsuites/validation/tr-event-send-receive.c
- testsuites/validation/ts-validation-0.c
diff --git a/testsuites/validation/tc-mem-posix-memalign.c b/testsuites/validation/tc-mem-posix-memalign.c
new file mode 100644
index 0000000000..2c87abcce2
--- /dev/null
+++ b/testsuites/validation/tc-mem-posix-memalign.c
@@ -0,0 +1,608 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup CReqPosixMemalign
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH & Co. KG
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This file is part of the RTEMS quality process and was automatically
+ * generated. If you find something that needs to be fixed or
+ * worded better please post a report or patch to an RTEMS mailing list
+ * or raise a bug report:
+ *
+ * https://www.rtems.org/bugs.html
+ *
+ * For information on updating and regenerating please refer to the How-To
+ * section in the Software Requirements Engineering chapter of the
+ * RTEMS Software Engineering manual. The manual is provided as a part of
+ * a release. For development sources please refer to the online
+ * documentation at:
+ *
+ * https://docs.rtems.org
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <errno.h>
+#include <stdlib.h>
+
+#include "tx-support.h"
+
+#include <rtems/test.h>
+
+/**
+ * @defgroup CReqPosixMemalign spec:/c/req/posix-memalign
+ *
+ * @ingroup TestsuitesValidation0
+ *
+ * @{
+ */
+
+typedef enum {
+ CReqPosixMemalign_Pre_Memptr_Valid,
+ CReqPosixMemalign_Pre_Memptr_Null,
+ CReqPosixMemalign_Pre_Memptr_NA
+} CReqPosixMemalign_Pre_Memptr;
+
+typedef enum {
+ CReqPosixMemalign_Pre_Alignment_Tiny,
+ CReqPosixMemalign_Pre_Alignment_NotPower2,
+ CReqPosixMemalign_Pre_Alignment_Huge,
+ CReqPosixMemalign_Pre_Alignment_Valid,
+ CReqPosixMemalign_Pre_Alignment_NA
+} CReqPosixMemalign_Pre_Alignment;
+
+typedef enum {
+ CReqPosixMemalign_Pre_Size_Huge,
+ CReqPosixMemalign_Pre_Size_Zero,
+ CReqPosixMemalign_Pre_Size_Valid,
+ CReqPosixMemalign_Pre_Size_NA
+} CReqPosixMemalign_Pre_Size;
+
+typedef enum {
+ CReqPosixMemalign_Post_Status_Zero,
+ CReqPosixMemalign_Post_Status_EINVAL,
+ CReqPosixMemalign_Post_Status_ENOMEM,
+ CReqPosixMemalign_Post_Status_NA
+} CReqPosixMemalign_Post_Status;
+
+typedef enum {
+ CReqPosixMemalign_Post_MemptrVar_AreaBegin,
+ CReqPosixMemalign_Post_MemptrVar_Null,
+ CReqPosixMemalign_Post_MemptrVar_Nop,
+ CReqPosixMemalign_Post_MemptrVar_NA
+} CReqPosixMemalign_Post_MemptrVar;
+
+typedef enum {
+ CReqPosixMemalign_Post_Alignment_Valid,
+ CReqPosixMemalign_Post_Alignment_NA
+} CReqPosixMemalign_Post_Alignment;
+
+typedef enum {
+ CReqPosixMemalign_Post_Size_Valid,
+ CReqPosixMemalign_Post_Size_NA
+} CReqPosixMemalign_Post_Size;
+
+typedef struct {
+ uint16_t Skip : 1;
+ uint16_t Pre_Memptr_NA : 1;
+ uint16_t Pre_Alignment_NA : 1;
+ uint16_t Pre_Size_NA : 1;
+ uint16_t Post_Status : 2;
+ uint16_t Post_MemptrVar : 2;
+ uint16_t Post_Alignment : 1;
+ uint16_t Post_Size : 1;
+} CReqPosixMemalign_Entry;
+
+/**
+ * @brief Test context for spec:/c/req/posix-memalign test case.
+ */
+typedef struct {
+ /**
+ * @brief This member provides a memory support context.
+ */
+ MemoryContext mem_ctx;
+
+ /**
+ * @brief This member provides the object referenced by the memptr parameter.
+ */
+ void *memptr_obj;
+
+ /**
+ * @brief This member contains the return value of the directive call.
+ */
+ int status;
+
+ /**
+ * @brief This member specifies if the memptr parameter value.
+ */
+ void **memptr;
+
+ /**
+ * @brief This member specifies if the alignment parameter value.
+ */
+ size_t alignment;
+
+ /**
+ * @brief This member specifies if the size parameter value.
+ */
+ size_t size;
+
+ struct {
+ /**
+ * @brief This member defines the pre-condition states for the next action.
+ */
+ size_t pcs[ 3 ];
+
+ /**
+ * @brief If this member is true, then the test action loop is executed.
+ */
+ bool in_action_loop;
+
+ /**
+ * @brief This member contains the next transition map index.
+ */
+ size_t index;
+
+ /**
+ * @brief This member contains the current transition map entry.
+ */
+ CReqPosixMemalign_Entry entry;
+
+ /**
+ * @brief If this member is true, then the current transition variant
+ * should be skipped.
+ */
+ bool skip;
+ } Map;
+} CReqPosixMemalign_Context;
+
+static CReqPosixMemalign_Context
+ CReqPosixMemalign_Instance;
+
+static const char * const CReqPosixMemalign_PreDesc_Memptr[] = {
+ "Valid",
+ "Null",
+ "NA"
+};
+
+static const char * const CReqPosixMemalign_PreDesc_Alignment[] = {
+ "Tiny",
+ "NotPower2",
+ "Huge",
+ "Valid",
+ "NA"
+};
+
+static const char * const CReqPosixMemalign_PreDesc_Size[] = {
+ "Huge",
+ "Zero",
+ "Valid",
+ "NA"
+};
+
+static const char * const * const CReqPosixMemalign_PreDesc[] = {
+ CReqPosixMemalign_PreDesc_Memptr,
+ CReqPosixMemalign_PreDesc_Alignment,
+ CReqPosixMemalign_PreDesc_Size,
+ NULL
+};
+
+static void CReqPosixMemalign_Pre_Memptr_Prepare(
+ CReqPosixMemalign_Context *ctx,
+ CReqPosixMemalign_Pre_Memptr state
+)
+{
+ switch ( state ) {
+ case CReqPosixMemalign_Pre_Memptr_Valid: {
+ /*
+ * While the memptr parameter references an object of type ``void *``.
+ */
+ ctx->memptr = &ctx->memptr_obj;
+ break;
+ }
+
+ case CReqPosixMemalign_Pre_Memptr_Null: {
+ /*
+ * While the memptr parameter is equal to NULL.
+ */
+ ctx->memptr = NULL;
+ break;
+ }
+
+ case CReqPosixMemalign_Pre_Memptr_NA:
+ break;
+ }
+}
+
+static void CReqPosixMemalign_Pre_Alignment_Prepare(
+ CReqPosixMemalign_Context *ctx,
+ CReqPosixMemalign_Pre_Alignment state
+)
+{
+ switch ( state ) {
+ case CReqPosixMemalign_Pre_Alignment_Tiny: {
+ /*
+ * While the alignment parameter is less than sizeof( void * ).
+ */
+ ctx->alignment = sizeof( void * ) - 1;
+ break;
+ }
+
+ case CReqPosixMemalign_Pre_Alignment_NotPower2: {
+ /*
+ * While the alignment parameter is greater than or equal to sizeof( void
+ * * ), while the alignment parameter is not a power of two.
+ */
+ ctx->alignment = sizeof( void * ) + 1;
+ break;
+ }
+
+ case CReqPosixMemalign_Pre_Alignment_Huge: {
+ /*
+ * While the alignment parameter is greater than or equal to sizeof( void
+ * * ), while the alignment parameter is a power of two, while the
+ * alignment parameter is too large to allocate a memory area with the
+ * specified alignment.
+ */
+ ctx->alignment = SIZE_MAX / 2 + 1;
+ break;
+ }
+
+ case CReqPosixMemalign_Pre_Alignment_Valid: {
+ /*
+ * While the alignment parameter is greater than or equal to sizeof( void
+ * * ), while the alignment parameter is a power of two, while the
+ * alignment parameter is small enough to allocate a memory area with the
+ * specified alignment.
+ */
+ ctx->alignment = 128;
+ break;
+ }
+
+ case CReqPosixMemalign_Pre_Alignment_NA:
+ break;
+ }
+}
+
+static void CReqPosixMemalign_Pre_Size_Prepare(
+ CReqPosixMemalign_Context *ctx,
+ CReqPosixMemalign_Pre_Size state
+)
+{
+ switch ( state ) {
+ case CReqPosixMemalign_Pre_Size_Huge: {
+ /*
+ * While the size parameter is not equal to zero, while the size
+ * parameter is too large to allocate a memory area with the specified
+ * size.
+ */
+ ctx->size = SIZE_MAX;
+ break;
+ }
+
+ case CReqPosixMemalign_Pre_Size_Zero: {
+ /*
+ * While the size parameter is equal to zero.
+ */
+ ctx->size = 0;
+ break;
+ }
+
+ case CReqPosixMemalign_Pre_Size_Valid: {
+ /*
+ * While the size parameter is not equal to zero, while the size
+ * parameter is small enough to allocate a memory area with the specified
+ * size.
+ */
+ ctx->size = sizeof( uint64_t );
+ break;
+ }
+
+ case CReqPosixMemalign_Pre_Size_NA:
+ break;
+ }
+}
+
+static void CReqPosixMemalign_Post_Status_Check(
+ CReqPosixMemalign_Context *ctx,
+ CReqPosixMemalign_Post_Status state
+)
+{
+ switch ( state ) {
+ case CReqPosixMemalign_Post_Status_Zero: {
+ /*
+ * The return value of posix_memalign() shall be equal to zero.
+ */
+ T_eq_int( ctx->status, 0 );
+ break;
+ }
+
+ case CReqPosixMemalign_Post_Status_EINVAL: {
+ /*
+ * The return value of posix_memalign() shall be equal to EINVAL.
+ */
+ T_eq_int( ctx->status, EINVAL );
+ break;
+ }
+
+ case CReqPosixMemalign_Post_Status_ENOMEM: {
+ /*
+ * The return value of posix_memalign() shall be equal to ENOMEM.
+ */
+ T_eq_int( ctx->status, ENOMEM );
+ break;
+ }
+
+ case CReqPosixMemalign_Post_Status_NA:
+ break;
+ }
+}
+
+static void CReqPosixMemalign_Post_MemptrVar_Check(
+ CReqPosixMemalign_Context *ctx,
+ CReqPosixMemalign_Post_MemptrVar state
+)
+{
+ switch ( state ) {
+ case CReqPosixMemalign_Post_MemptrVar_AreaBegin: {
+ /*
+ * The value of the object referenced by the memptr parameter shall be
+ * set to the begin address of the allocated memory area after the return
+ * of the posix_memalign() call.
+ */
+ T_eq_ptr( ctx->memptr, &ctx->memptr_obj );
+ T_not_null( ctx->memptr_obj );
+ break;
+ }
+
+ case CReqPosixMemalign_Post_MemptrVar_Null: {
+ /*
+ * The value of the object referenced by the memptr parameter shall be
+ * set to NULL after the return of the posix_memalign() call.
+ */
+ T_eq_ptr( ctx->memptr, &ctx->memptr_obj );
+ T_null( ctx->memptr_obj );
+ break;
+ }
+
+ case CReqPosixMemalign_Post_MemptrVar_Nop: {
+ /*
+ * Objects referenced by the memptr parameter in past calls to
+ * posix_memalign() shall not be accessed by the posix_memalign() call.
+ */
+ T_eq_uptr( (uintptr_t) ctx->memptr_obj, 1 );
+ break;
+ }
+
+ case CReqPosixMemalign_Post_MemptrVar_NA:
+ break;
+ }
+}
+
+static void CReqPosixMemalign_Post_Alignment_Check(
+ CReqPosixMemalign_Context *ctx,
+ CReqPosixMemalign_Post_Alignment state
+)
+{
+ switch ( state ) {
+ case CReqPosixMemalign_Post_Alignment_Valid: {
+ /*
+ * The begin address of the allocated memory area shall be an integral
+ * multiple of the alignment parameter.
+ */
+ T_eq_uptr( (uintptr_t) ctx->memptr_obj % 128, 0 );
+ break;
+ }
+
+ case CReqPosixMemalign_Post_Alignment_NA:
+ break;
+ }
+}
+
+static void CReqPosixMemalign_Post_Size_Check(
+ CReqPosixMemalign_Context *ctx,
+ CReqPosixMemalign_Post_Size state
+)
+{
+ void *ptr;
+ int eno;
+ uintptr_t a;
+ uintptr_t b;
+ uintptr_t size;
+
+ switch ( state ) {
+ case CReqPosixMemalign_Post_Size_Valid: {
+ /*
+ * The size of the allocated memory area shall greater than or equal to
+ * the size parameter.
+ */
+ /* Assume that the next allocation is done from adjacent memory */
+ ptr = ctx->memptr_obj;
+ eno = posix_memalign( &ptr, ctx->alignment, ctx->size );
+ T_eq_int( eno, 0 );
+ T_not_null( ptr );
+ a = (uintptr_t) ptr;
+ b = (uintptr_t) ctx->memptr_obj;
+ size = a < b ? b - a : a - b;
+ T_ge_uptr( size, ctx->size );
+ break;
+ }
+
+ case CReqPosixMemalign_Post_Size_NA:
+ break;
+ }
+}
+
+static void CReqPosixMemalign_Setup( CReqPosixMemalign_Context *ctx )
+{
+ MemorySave( &ctx->mem_ctx );
+}
+
+static void CReqPosixMemalign_Setup_Wrap( void *arg )
+{
+ CReqPosixMemalign_Context *ctx;
+
+ ctx = arg;
+ ctx->Map.in_action_loop = false;
+ CReqPosixMemalign_Setup( ctx );
+}
+
+static void CReqPosixMemalign_Teardown( CReqPosixMemalign_Context *ctx )
+{
+ MemoryRestore( &ctx->mem_ctx );
+}
+
+static void CReqPosixMemalign_Teardown_Wrap( void *arg )
+{
+ CReqPosixMemalign_Context *ctx;
+
+ ctx = arg;
+ ctx->Map.in_action_loop = false;
+ CReqPosixMemalign_Teardown( ctx );
+}
+
+static void CReqPosixMemalign_Prepare( CReqPosixMemalign_Context *ctx )
+{
+ ctx->memptr_obj = (void *)(uintptr_t) 1;
+}
+
+static void CReqPosixMemalign_Action( CReqPosixMemalign_Context *ctx )
+{
+ ctx->status = posix_memalign( ctx->memptr, ctx->alignment, ctx->size );
+}
+
+static const CReqPosixMemalign_Entry
+CReqPosixMemalign_Entries[] = {
+ { 0, 0, 0, 0, CReqPosixMemalign_Post_Status_EINVAL,
+ CReqPosixMemalign_Post_MemptrVar_Nop, CReqPosixMemalign_Post_Alignment_NA,
+ CReqPosixMemalign_Post_Size_NA },
+ { 0, 0, 0, 0, CReqPosixMemalign_Post_Status_EINVAL,
+ CReqPosixMemalign_Post_MemptrVar_Null, CReqPosixMemalign_Post_Alignment_NA,
+ CReqPosixMemalign_Post_Size_NA },
+ { 0, 0, 0, 0, CReqPosixMemalign_Post_Status_ENOMEM,
+ CReqPosixMemalign_Post_MemptrVar_Null, CReqPosixMemalign_Post_Alignment_NA,
+ CReqPosixMemalign_Post_Size_NA },
+ { 0, 0, 0, 0, CReqPosixMemalign_Post_Status_Zero,
+ CReqPosixMemalign_Post_MemptrVar_Null,
+ CReqPosixMemalign_Post_Alignment_Valid, CReqPosixMemalign_Post_Size_NA },
+ { 0, 0, 0, 0, CReqPosixMemalign_Post_Status_Zero,
+ CReqPosixMemalign_Post_MemptrVar_AreaBegin,
+ CReqPosixMemalign_Post_Alignment_Valid, CReqPosixMemalign_Post_Size_Valid }
+};
+
+static const uint8_t
+CReqPosixMemalign_Map[] = {
+ 1, 1, 1, 1, 1, 1, 2, 3, 2, 2, 3, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static size_t CReqPosixMemalign_Scope( void *arg, char *buf, size_t n )
+{
+ CReqPosixMemalign_Context *ctx;
+
+ ctx = arg;
+
+ if ( ctx->Map.in_action_loop ) {
+ return T_get_scope( CReqPosixMemalign_PreDesc, buf, n, ctx->Map.pcs );
+ }
+
+ return 0;
+}
+
+static T_fixture CReqPosixMemalign_Fixture = {
+ .setup = CReqPosixMemalign_Setup_Wrap,
+ .stop = NULL,
+ .teardown = CReqPosixMemalign_Teardown_Wrap,
+ .scope = CReqPosixMemalign_Scope,
+ .initial_context = &CReqPosixMemalign_Instance
+};
+
+static inline CReqPosixMemalign_Entry CReqPosixMemalign_PopEntry(
+ CReqPosixMemalign_Context *ctx
+)
+{
+ size_t index;
+
+ index = ctx->Map.index;
+ ctx->Map.index = index + 1;
+ return CReqPosixMemalign_Entries[
+ CReqPosixMemalign_Map[ index ]
+ ];
+}
+
+static void CReqPosixMemalign_TestVariant( CReqPosixMemalign_Context *ctx )
+{
+ CReqPosixMemalign_Pre_Memptr_Prepare( ctx, ctx->Map.pcs[ 0 ] );
+ CReqPosixMemalign_Pre_Alignment_Prepare( ctx, ctx->Map.pcs[ 1 ] );
+ CReqPosixMemalign_Pre_Size_Prepare( ctx, ctx->Map.pcs[ 2 ] );
+ CReqPosixMemalign_Action( ctx );
+ CReqPosixMemalign_Post_Status_Check( ctx, ctx->Map.entry.Post_Status );
+ CReqPosixMemalign_Post_MemptrVar_Check( ctx, ctx->Map.entry.Post_MemptrVar );
+ CReqPosixMemalign_Post_Alignment_Check( ctx, ctx->Map.entry.Post_Alignment );
+ CReqPosixMemalign_Post_Size_Check( ctx, ctx->Map.entry.Post_Size );
+}
+
+/**
+ * @fn void T_case_body_CReqPosixMemalign( void )
+ */
+T_TEST_CASE_FIXTURE( CReqPosixMemalign, &CReqPosixMemalign_Fixture )
+{
+ CReqPosixMemalign_Context *ctx;
+
+ ctx = T_fixture_context();
+ ctx->Map.in_action_loop = true;
+ ctx->Map.index = 0;
+
+ for (
+ ctx->Map.pcs[ 0 ] = CReqPosixMemalign_Pre_Memptr_Valid;
+ ctx->Map.pcs[ 0 ] < CReqPosixMemalign_Pre_Memptr_NA;
+ ++ctx->Map.pcs[ 0 ]
+ ) {
+ for (
+ ctx->Map.pcs[ 1 ] = CReqPosixMemalign_Pre_Alignment_Tiny;
+ ctx->Map.pcs[ 1 ] < CReqPosixMemalign_Pre_Alignment_NA;
+ ++ctx->Map.pcs[ 1 ]
+ ) {
+ for (
+ ctx->Map.pcs[ 2 ] = CReqPosixMemalign_Pre_Size_Huge;
+ ctx->Map.pcs[ 2 ] < CReqPosixMemalign_Pre_Size_NA;
+ ++ctx->Map.pcs[ 2 ]
+ ) {
+ ctx->Map.entry = CReqPosixMemalign_PopEntry( ctx );
+ CReqPosixMemalign_Prepare( ctx );
+ CReqPosixMemalign_TestVariant( ctx );
+ }
+ }
+ }
+}
+
+/** @} */
diff --git a/testsuites/validation/tc-mem-rtems-calloc.c b/testsuites/validation/tc-mem-rtems-calloc.c
new file mode 100644
index 0000000000..18802d0478
--- /dev/null
+++ b/testsuites/validation/tc-mem-rtems-calloc.c
@@ -0,0 +1,493 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RtemsMallocReqCalloc
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH & Co. KG
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This file is part of the RTEMS quality process and was automatically
+ * generated. If you find something that needs to be fixed or
+ * worded better please post a report or patch to an RTEMS mailing list
+ * or raise a bug report:
+ *
+ * https://www.rtems.org/bugs.html
+ *
+ * For information on updating and regenerating please refer to the How-To
+ * section in the Software Requirements Engineering chapter of the
+ * RTEMS Software Engineering manual. The manual is provided as a part of
+ * a release. For development sources please refer to the online
+ * documentation at:
+ *
+ * https://docs.rtems.org
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <rtems/malloc.h>
+
+#include "tx-support.h"
+
+#include <rtems/test.h>
+
+/**
+ * @defgroup RtemsMallocReqCalloc spec:/rtems/malloc/req/calloc
+ *
+ * @ingroup TestsuitesValidation0
+ *
+ * @{
+ */
+
+typedef enum {
+ RtemsMallocReqCalloc_Pre_ElementCount_Huge,
+ RtemsMallocReqCalloc_Pre_ElementCount_Zero,
+ RtemsMallocReqCalloc_Pre_ElementCount_Valid,
+ RtemsMallocReqCalloc_Pre_ElementCount_NA
+} RtemsMallocReqCalloc_Pre_ElementCount;
+
+typedef enum {
+ RtemsMallocReqCalloc_Pre_ElementSize_Huge,
+ RtemsMallocReqCalloc_Pre_ElementSize_Zero,
+ RtemsMallocReqCalloc_Pre_ElementSize_Valid,
+ RtemsMallocReqCalloc_Pre_ElementSize_NA
+} RtemsMallocReqCalloc_Pre_ElementSize;
+
+typedef enum {
+ RtemsMallocReqCalloc_Post_Status_Null,
+ RtemsMallocReqCalloc_Post_Status_AreaBegin,
+ RtemsMallocReqCalloc_Post_Status_NA
+} RtemsMallocReqCalloc_Post_Status;
+
+typedef enum {
+ RtemsMallocReqCalloc_Post_Alignment_Valid,
+ RtemsMallocReqCalloc_Post_Alignment_NA
+} RtemsMallocReqCalloc_Post_Alignment;
+
+typedef enum {
+ RtemsMallocReqCalloc_Post_Size_Valid,
+ RtemsMallocReqCalloc_Post_Size_NA
+} RtemsMallocReqCalloc_Post_Size;
+
+typedef enum {
+ RtemsMallocReqCalloc_Post_Content_Zero,
+ RtemsMallocReqCalloc_Post_Content_NA
+} RtemsMallocReqCalloc_Post_Content;
+
+typedef struct {
+ uint8_t Skip : 1;
+ uint8_t Pre_ElementCount_NA : 1;
+ uint8_t Pre_ElementSize_NA : 1;
+ uint8_t Post_Status : 2;
+ uint8_t Post_Alignment : 1;
+ uint8_t Post_Size : 1;
+ uint8_t Post_Content : 1;
+} RtemsMallocReqCalloc_Entry;
+
+/**
+ * @brief Test context for spec:/rtems/malloc/req/calloc test case.
+ */
+typedef struct {
+ /**
+ * @brief This member provides a memory support context.
+ */
+ MemoryContext mem_ctx;
+
+ /**
+ * @brief This member contains the return value of the rtems_calloc() call.
+ */
+ void *ptr;
+
+ /**
+ * @brief This member specifies if the ``nelem`` parameter value.
+ */
+ size_t nelem;
+
+ /**
+ * @brief This member specifies if the ``elsize`` parameter value.
+ */
+ size_t elsize;
+
+ struct {
+ /**
+ * @brief This member defines the pre-condition states for the next action.
+ */
+ size_t pcs[ 2 ];
+
+ /**
+ * @brief If this member is true, then the test action loop is executed.
+ */
+ bool in_action_loop;
+
+ /**
+ * @brief This member contains the next transition map index.
+ */
+ size_t index;
+
+ /**
+ * @brief This member contains the current transition map entry.
+ */
+ RtemsMallocReqCalloc_Entry entry;
+
+ /**
+ * @brief If this member is true, then the current transition variant
+ * should be skipped.
+ */
+ bool skip;
+ } Map;
+} RtemsMallocReqCalloc_Context;
+
+static RtemsMallocReqCalloc_Context
+ RtemsMallocReqCalloc_Instance;
+
+static const char * const RtemsMallocReqCalloc_PreDesc_ElementCount[] = {
+ "Huge",
+ "Zero",
+ "Valid",
+ "NA"
+};
+
+static const char * const RtemsMallocReqCalloc_PreDesc_ElementSize[] = {
+ "Huge",
+ "Zero",
+ "Valid",
+ "NA"
+};
+
+static const char * const * const RtemsMallocReqCalloc_PreDesc[] = {
+ RtemsMallocReqCalloc_PreDesc_ElementCount,
+ RtemsMallocReqCalloc_PreDesc_ElementSize,
+ NULL
+};
+
+static void RtemsMallocReqCalloc_Pre_ElementCount_Prepare(
+ RtemsMallocReqCalloc_Context *ctx,
+ RtemsMallocReqCalloc_Pre_ElementCount state
+)
+{
+ switch ( state ) {
+ case RtemsMallocReqCalloc_Pre_ElementCount_Huge: {
+ /*
+ * While the ``nelem`` parameter is not equal to zero, while the
+ * ``nelem`` parameter is too large to allocate a memory area with the
+ * specified size.
+ */
+ ctx->nelem = SIZE_MAX;
+ break;
+ }
+
+ case RtemsMallocReqCalloc_Pre_ElementCount_Zero: {
+ /*
+ * While the ``nelem`` parameter is equal to zero.
+ */
+ ctx->nelem = 0;
+ break;
+ }
+
+ case RtemsMallocReqCalloc_Pre_ElementCount_Valid: {
+ /*
+ * While the ``nelem`` parameter is not equal to zero, while the
+ * ``nelem`` parameter is small enough to allocate a memory area with the
+ * specified size.
+ */
+ ctx->nelem = 1;
+ break;
+ }
+
+ case RtemsMallocReqCalloc_Pre_ElementCount_NA:
+ break;
+ }
+}
+
+static void RtemsMallocReqCalloc_Pre_ElementSize_Prepare(
+ RtemsMallocReqCalloc_Context *ctx,
+ RtemsMallocReqCalloc_Pre_ElementSize state
+)
+{
+ switch ( state ) {
+ case RtemsMallocReqCalloc_Pre_ElementSize_Huge: {
+ /*
+ * While the ``elsize`` parameter is not equal to zero, while the
+ * ``elsize`` parameter is too large to allocate a memory area with the
+ * specified size.
+ */
+ ctx->elsize = SIZE_MAX;
+ break;
+ }
+
+ case RtemsMallocReqCalloc_Pre_ElementSize_Zero: {
+ /*
+ * While the ``elsize`` parameter is equal to zero.
+ */
+ ctx->elsize = 0;
+ break;
+ }
+
+ case RtemsMallocReqCalloc_Pre_ElementSize_Valid: {
+ /*
+ * While the ``elsize`` parameter is not equal to zero, while the
+ * ``elsize`` parameter is small enough to allocate a memory area with
+ * the specified size.
+ */
+ ctx->elsize = sizeof( uint64_t );
+ break;
+ }
+
+ case RtemsMallocReqCalloc_Pre_ElementSize_NA:
+ break;
+ }
+}
+
+static void RtemsMallocReqCalloc_Post_Status_Check(
+ RtemsMallocReqCalloc_Context *ctx,
+ RtemsMallocReqCalloc_Post_Status state
+)
+{
+ switch ( state ) {
+ case RtemsMallocReqCalloc_Post_Status_Null: {
+ /*
+ * The return value of rtems_calloc() shall be equal to NULL.
+ */
+ T_null( ctx->ptr );
+ break;
+ }
+
+ case RtemsMallocReqCalloc_Post_Status_AreaBegin: {
+ /*
+ * The return value of rtems_calloc() shall be equal to the begin address
+ * of the allocated memory area.
+ */
+ T_not_null( ctx->ptr );
+ break;
+ }
+
+ case RtemsMallocReqCalloc_Post_Status_NA:
+ break;
+ }
+}
+
+static void RtemsMallocReqCalloc_Post_Alignment_Check(
+ RtemsMallocReqCalloc_Context *ctx,
+ RtemsMallocReqCalloc_Post_Alignment state
+)
+{
+ switch ( state ) {
+ case RtemsMallocReqCalloc_Post_Alignment_Valid: {
+ /*
+ * The begin address of the allocated memory area shall be an integral
+ * multiple of the heap alignment of the target architecture.
+ */
+ T_eq_uptr( (uintptr_t) ctx->ptr % CPU_HEAP_ALIGNMENT, 0 );
+ break;
+ }
+
+ case RtemsMallocReqCalloc_Post_Alignment_NA:
+ break;
+ }
+}
+
+static void RtemsMallocReqCalloc_Post_Size_Check(
+ RtemsMallocReqCalloc_Context *ctx,
+ RtemsMallocReqCalloc_Post_Size state
+)
+{
+ void *ptr;
+ uintptr_t a;
+ uintptr_t b;
+ uintptr_t size;
+
+ switch ( state ) {
+ case RtemsMallocReqCalloc_Post_Size_Valid: {
+ /*
+ * The size of the allocated memory area shall greater than or equal to
+ * the product of the ``nelem`` and ``elsize`` parameters.
+ */
+ /* Assume that the next allocation is done from adjacent memory */
+ ptr = ctx->ptr;
+ ctx->ptr = rtems_calloc( ctx->nelem, ctx->elsize );
+ T_not_null( ptr );
+ a = (uintptr_t) ptr;
+ b = (uintptr_t) ctx->ptr;
+ size = a < b ? b - a : a - b;
+ T_ge_uptr( size, ctx->nelem * ctx->elsize );
+ break;
+ }
+
+ case RtemsMallocReqCalloc_Post_Size_NA:
+ break;
+ }
+}
+
+static void RtemsMallocReqCalloc_Post_Content_Check(
+ RtemsMallocReqCalloc_Context *ctx,
+ RtemsMallocReqCalloc_Post_Content state
+)
+{
+ switch ( state ) {
+ case RtemsMallocReqCalloc_Post_Content_Zero: {
+ /*
+ * The content of the allocated memory area shall be cleared to zero.
+ */
+ T_eq_u64( *(uint64_t *) ctx->ptr, 0 );
+ break;
+ }
+
+ case RtemsMallocReqCalloc_Post_Content_NA:
+ break;
+ }
+}
+
+static void RtemsMallocReqCalloc_Setup( RtemsMallocReqCalloc_Context *ctx )
+{
+ MemorySave( &ctx->mem_ctx );
+}
+
+static void RtemsMallocReqCalloc_Setup_Wrap( void *arg )
+{
+ RtemsMallocReqCalloc_Context *ctx;
+
+ ctx = arg;
+ ctx->Map.in_action_loop = false;
+ RtemsMallocReqCalloc_Setup( ctx );
+}
+
+static void RtemsMallocReqCalloc_Teardown( RtemsMallocReqCalloc_Context *ctx )
+{
+ MemoryRestore( &ctx->mem_ctx );
+}
+
+static void RtemsMallocReqCalloc_Teardown_Wrap( void *arg )
+{
+ RtemsMallocReqCalloc_Context *ctx;
+
+ ctx = arg;
+ ctx->Map.in_action_loop = false;
+ RtemsMallocReqCalloc_Teardown( ctx );
+}
+
+static void RtemsMallocReqCalloc_Action( RtemsMallocReqCalloc_Context *ctx )
+{
+ ctx->ptr = rtems_calloc( ctx->nelem, ctx->elsize );
+}
+
+static const RtemsMallocReqCalloc_Entry
+RtemsMallocReqCalloc_Entries[] = {
+ { 0, 0, 0, RtemsMallocReqCalloc_Post_Status_Null,
+ RtemsMallocReqCalloc_Post_Alignment_Valid,
+ RtemsMallocReqCalloc_Post_Size_NA, RtemsMallocReqCalloc_Post_Content_NA },
+ { 0, 0, 0, RtemsMallocReqCalloc_Post_Status_AreaBegin,
+ RtemsMallocReqCalloc_Post_Alignment_Valid,
+ RtemsMallocReqCalloc_Post_Size_Valid,
+ RtemsMallocReqCalloc_Post_Content_Zero }
+};
+
+static const uint8_t
+RtemsMallocReqCalloc_Map[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 1
+};
+
+static size_t RtemsMallocReqCalloc_Scope( void *arg, char *buf, size_t n )
+{
+ RtemsMallocReqCalloc_Context *ctx;
+
+ ctx = arg;
+
+ if ( ctx->Map.in_action_loop ) {
+ return T_get_scope( RtemsMallocReqCalloc_PreDesc, buf, n, ctx->Map.pcs );
+ }
+
+ return 0;
+}
+
+static T_fixture RtemsMallocReqCalloc_Fixture = {
+ .setup = RtemsMallocReqCalloc_Setup_Wrap,
+ .stop = NULL,
+ .teardown = RtemsMallocReqCalloc_Teardown_Wrap,
+ .scope = RtemsMallocReqCalloc_Scope,
+ .initial_context = &RtemsMallocReqCalloc_Instance
+};
+
+static inline RtemsMallocReqCalloc_Entry RtemsMallocReqCalloc_PopEntry(
+ RtemsMallocReqCalloc_Context *ctx
+)
+{
+ size_t index;
+
+ index = ctx->Map.index;
+ ctx->Map.index = index + 1;
+ return RtemsMallocReqCalloc_Entries[
+ RtemsMallocReqCalloc_Map[ index ]
+ ];
+}
+
+static void RtemsMallocReqCalloc_TestVariant(
+ RtemsMallocReqCalloc_Context *ctx
+)
+{
+ RtemsMallocReqCalloc_Pre_ElementCount_Prepare( ctx, ctx->Map.pcs[ 0 ] );
+ RtemsMallocReqCalloc_Pre_ElementSize_Prepare( ctx, ctx->Map.pcs[ 1 ] );
+ RtemsMallocReqCalloc_Action( ctx );
+ RtemsMallocReqCalloc_Post_Status_Check( ctx, ctx->Map.entry.Post_Status );
+ RtemsMallocReqCalloc_Post_Alignment_Check(
+ ctx,
+ ctx->Map.entry.Post_Alignment
+ );
+ RtemsMallocReqCalloc_Post_Size_Check( ctx, ctx->Map.entry.Post_Size );
+ RtemsMallocReqCalloc_Post_Content_Check( ctx, ctx->Map.entry.Post_Content );
+}
+
+/**
+ * @fn void T_case_body_RtemsMallocReqCalloc( void )
+ */
+T_TEST_CASE_FIXTURE( RtemsMallocReqCalloc, &RtemsMallocReqCalloc_Fixture )
+{
+ RtemsMallocReqCalloc_Context *ctx;
+
+ ctx = T_fixture_context();
+ ctx->Map.in_action_loop = true;
+ ctx->Map.index = 0;
+
+ for (
+ ctx->Map.pcs[ 0 ] = RtemsMallocReqCalloc_Pre_ElementCount_Huge;
+ ctx->Map.pcs[ 0 ] < RtemsMallocReqCalloc_Pre_ElementCount_NA;
+ ++ctx->Map.pcs[ 0 ]
+ ) {
+ for (
+ ctx->Map.pcs[ 1 ] = RtemsMallocReqCalloc_Pre_ElementSize_Huge;
+ ctx->Map.pcs[ 1 ] < RtemsMallocReqCalloc_Pre_ElementSize_NA;
+ ++ctx->Map.pcs[ 1 ]
+ ) {
+ ctx->Map.entry = RtemsMallocReqCalloc_PopEntry( ctx );
+ RtemsMallocReqCalloc_TestVariant( ctx );
+ }
+ }
+}
+
+/** @} */
diff --git a/testsuites/validation/tc-mem-rtems-malloc.c b/testsuites/validation/tc-mem-rtems-malloc.c
new file mode 100644
index 0000000000..c4ba0bc170
--- /dev/null
+++ b/testsuites/validation/tc-mem-rtems-malloc.c
@@ -0,0 +1,399 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RtemsMallocReqMalloc
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH & Co. KG
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This file is part of the RTEMS quality process and was automatically
+ * generated. If you find something that needs to be fixed or
+ * worded better please post a report or patch to an RTEMS mailing list
+ * or raise a bug report:
+ *
+ * https://www.rtems.org/bugs.html
+ *
+ * For information on updating and regenerating please refer to the How-To
+ * section in the Software Requirements Engineering chapter of the
+ * RTEMS Software Engineering manual. The manual is provided as a part of
+ * a release. For development sources please refer to the online
+ * documentation at:
+ *
+ * https://docs.rtems.org
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <rtems/malloc.h>
+
+#include "tx-support.h"
+
+#include <rtems/test.h>
+
+/**
+ * @defgroup RtemsMallocReqMalloc spec:/rtems/malloc/req/malloc
+ *
+ * @ingroup TestsuitesValidation0
+ *
+ * @{
+ */
+
+typedef enum {
+ RtemsMallocReqMalloc_Pre_Size_Huge,
+ RtemsMallocReqMalloc_Pre_Size_Zero,
+ RtemsMallocReqMalloc_Pre_Size_Valid,
+ RtemsMallocReqMalloc_Pre_Size_NA
+} RtemsMallocReqMalloc_Pre_Size;
+
+typedef enum {
+ RtemsMallocReqMalloc_Post_Status_Null,
+ RtemsMallocReqMalloc_Post_Status_AreaBegin,
+ RtemsMallocReqMalloc_Post_Status_NA
+} RtemsMallocReqMalloc_Post_Status;
+
+typedef enum {
+ RtemsMallocReqMalloc_Post_Alignment_Valid,
+ RtemsMallocReqMalloc_Post_Alignment_NA
+} RtemsMallocReqMalloc_Post_Alignment;
+
+typedef enum {
+ RtemsMallocReqMalloc_Post_Size_Valid,
+ RtemsMallocReqMalloc_Post_Size_NA
+} RtemsMallocReqMalloc_Post_Size;
+
+typedef struct {
+ uint8_t Skip : 1;
+ uint8_t Pre_Size_NA : 1;
+ uint8_t Post_Status : 2;
+ uint8_t Post_Alignment : 1;
+ uint8_t Post_Size : 1;
+} RtemsMallocReqMalloc_Entry;
+
+/**
+ * @brief Test context for spec:/rtems/malloc/req/malloc test case.
+ */
+typedef struct {
+ /**
+ * @brief This member provides a memory support context.
+ */
+ MemoryContext mem_ctx;
+
+ /**
+ * @brief This member contains the return value of the rtems_malloc() call.
+ */
+ void *ptr;
+
+ /**
+ * @brief This member specifies if the ``size`` parameter value.
+ */
+ size_t size;
+
+ struct {
+ /**
+ * @brief This member defines the pre-condition states for the next action.
+ */
+ size_t pcs[ 1 ];
+
+ /**
+ * @brief If this member is true, then the test action loop is executed.
+ */
+ bool in_action_loop;
+
+ /**
+ * @brief This member contains the next transition map index.
+ */
+ size_t index;
+
+ /**
+ * @brief This member contains the current transition map entry.
+ */
+ RtemsMallocReqMalloc_Entry entry;
+
+ /**
+ * @brief If this member is true, then the current transition variant
+ * should be skipped.
+ */
+ bool skip;
+ } Map;
+} RtemsMallocReqMalloc_Context;
+
+static RtemsMallocReqMalloc_Context
+ RtemsMallocReqMalloc_Instance;
+
+static const char * const RtemsMallocReqMalloc_PreDesc_Size[] = {
+ "Huge",
+ "Zero",
+ "Valid",
+ "NA"
+};
+
+static const char * const * const RtemsMallocReqMalloc_PreDesc[] = {
+ RtemsMallocReqMalloc_PreDesc_Size,
+ NULL
+};
+
+static void RtemsMallocReqMalloc_Pre_Size_Prepare(
+ RtemsMallocReqMalloc_Context *ctx,
+ RtemsMallocReqMalloc_Pre_Size state
+)
+{
+ switch ( state ) {
+ case RtemsMallocReqMalloc_Pre_Size_Huge: {
+ /*
+ * While the ``size`` parameter is not equal to zero, while the ``size``
+ * parameter is too large to allocate a memory area with the specified
+ * size.
+ */
+ ctx->size = SIZE_MAX;
+ break;
+ }
+
+ case RtemsMallocReqMalloc_Pre_Size_Zero: {
+ /*
+ * While the ``size`` parameter is equal to zero.
+ */
+ ctx->size = 0;
+ break;
+ }
+
+ case RtemsMallocReqMalloc_Pre_Size_Valid: {
+ /*
+ * While the ``size`` parameter is not equal to zero, while the ``size``
+ * parameter is small enough to allocate a memory area with the specified
+ * size.
+ */
+ ctx->size = 1;
+ break;
+ }
+
+ case RtemsMallocReqMalloc_Pre_Size_NA:
+ break;
+ }
+}
+
+static void RtemsMallocReqMalloc_Post_Status_Check(
+ RtemsMallocReqMalloc_Context *ctx,
+ RtemsMallocReqMalloc_Post_Status state
+)
+{
+ switch ( state ) {
+ case RtemsMallocReqMalloc_Post_Status_Null: {
+ /*
+ * The return value of rtems_malloc() shall be equal to NULL.
+ */
+ T_null( ctx->ptr );
+ break;
+ }
+
+ case RtemsMallocReqMalloc_Post_Status_AreaBegin: {
+ /*
+ * The return value of rtems_malloc() shall be equal to the begin address
+ * of the allocated memory area.
+ */
+ T_not_null( ctx->ptr );
+ break;
+ }
+
+ case RtemsMallocReqMalloc_Post_Status_NA:
+ break;
+ }
+}
+
+static void RtemsMallocReqMalloc_Post_Alignment_Check(
+ RtemsMallocReqMalloc_Context *ctx,
+ RtemsMallocReqMalloc_Post_Alignment state
+)
+{
+ switch ( state ) {
+ case RtemsMallocReqMalloc_Post_Alignment_Valid: {
+ /*
+ * The begin address of the allocated memory area shall be an integral
+ * multiple of the heap alignment of the target architecture.
+ */
+ T_eq_uptr( (uintptr_t) ctx->ptr % CPU_HEAP_ALIGNMENT, 0 );
+ break;
+ }
+
+ case RtemsMallocReqMalloc_Post_Alignment_NA:
+ break;
+ }
+}
+
+static void RtemsMallocReqMalloc_Post_Size_Check(
+ RtemsMallocReqMalloc_Context *ctx,
+ RtemsMallocReqMalloc_Post_Size state
+)
+{
+ void *ptr;
+ uintptr_t a;
+ uintptr_t b;
+ uintptr_t size;
+
+ switch ( state ) {
+ case RtemsMallocReqMalloc_Post_Size_Valid: {
+ /*
+ * The size of the allocated memory area shall greater than or equal to
+ * the ``size` parameter.
+ */
+ /* Assume that the next allocation is done from adjacent memory */
+ ptr = ctx->ptr;
+ ctx->ptr = rtems_malloc( ctx->size );
+ T_not_null( ptr );
+ a = (uintptr_t) ptr;
+ b = (uintptr_t) ctx->ptr;
+ size = a < b ? b - a : a - b;
+ T_ge_uptr( size, ctx->size );
+ break;
+ }
+
+ case RtemsMallocReqMalloc_Post_Size_NA:
+ break;
+ }
+}
+
+static void RtemsMallocReqMalloc_Setup( RtemsMallocReqMalloc_Context *ctx )
+{
+ MemorySave( &ctx->mem_ctx );
+}
+
+static void RtemsMallocReqMalloc_Setup_Wrap( void *arg )
+{
+ RtemsMallocReqMalloc_Context *ctx;
+
+ ctx = arg;
+ ctx->Map.in_action_loop = false;
+ RtemsMallocReqMalloc_Setup( ctx );
+}
+
+static void RtemsMallocReqMalloc_Teardown( RtemsMallocReqMalloc_Context *ctx )
+{
+ MemoryRestore( &ctx->mem_ctx );
+}
+
+static void RtemsMallocReqMalloc_Teardown_Wrap( void *arg )
+{
+ RtemsMallocReqMalloc_Context *ctx;
+
+ ctx = arg;
+ ctx->Map.in_action_loop = false;
+ RtemsMallocReqMalloc_Teardown( ctx );
+}
+
+static void RtemsMallocReqMalloc_Action( RtemsMallocReqMalloc_Context *ctx )
+{
+ ctx->ptr = rtems_malloc( ctx->size );
+}
+
+static const RtemsMallocReqMalloc_Entry
+RtemsMallocReqMalloc_Entries[] = {
+ { 0, 0, RtemsMallocReqMalloc_Post_Status_Null,
+ RtemsMallocReqMalloc_Post_Alignment_Valid,
+ RtemsMallocReqMalloc_Post_Size_NA },
+ { 0, 0, RtemsMallocReqMalloc_Post_Status_AreaBegin,
+ RtemsMallocReqMalloc_Post_Alignment_Valid,
+ RtemsMallocReqMalloc_Post_Size_Valid }
+};
+
+static const uint8_t
+RtemsMallocReqMalloc_Map[] = {
+ 0, 0, 1
+};
+
+static size_t RtemsMallocReqMalloc_Scope( void *arg, char *buf, size_t n )
+{
+ RtemsMallocReqMalloc_Context *ctx;
+
+ ctx = arg;
+
+ if ( ctx->Map.in_action_loop ) {
+ return T_get_scope( RtemsMallocReqMalloc_PreDesc, buf, n, ctx->Map.pcs );
+ }
+
+ return 0;
+}
+
+static T_fixture RtemsMallocReqMalloc_Fixture = {
+ .setup = RtemsMallocReqMalloc_Setup_Wrap,
+ .stop = NULL,
+ .teardown = RtemsMallocReqMalloc_Teardown_Wrap,
+ .scope = RtemsMallocReqMalloc_Scope,
+ .initial_context = &RtemsMallocReqMalloc_Instance
+};
+
+static inline RtemsMallocReqMalloc_Entry RtemsMallocReqMalloc_PopEntry(
+ RtemsMallocReqMalloc_Context *ctx
+)
+{
+ size_t index;
+
+ index = ctx->Map.index;
+ ctx->Map.index = index + 1;
+ return RtemsMallocReqMalloc_Entries[
+ RtemsMallocReqMalloc_Map[ index ]
+ ];
+}
+
+static void RtemsMallocReqMalloc_TestVariant(
+ RtemsMallocReqMalloc_Context *ctx
+)
+{
+ RtemsMallocReqMalloc_Pre_Size_Prepare( ctx, ctx->Map.pcs[ 0 ] );
+ RtemsMallocReqMalloc_Action( ctx );
+ RtemsMallocReqMalloc_Post_Status_Check( ctx, ctx->Map.entry.Post_Status );
+ RtemsMallocReqMalloc_Post_Alignment_Check(
+ ctx,
+ ctx->Map.entry.Post_Alignment
+ );
+ RtemsMallocReqMalloc_Post_Size_Check( ctx, ctx->Map.entry.Post_Size );
+}
+
+/**
+ * @fn void T_case_body_RtemsMallocReqMalloc( void )
+ */
+T_TEST_CASE_FIXTURE( RtemsMallocReqMalloc, &RtemsMallocReqMalloc_Fixture )
+{
+ RtemsMallocReqMalloc_Context *ctx;
+
+ ctx = T_fixture_context();
+ ctx->Map.in_action_loop = true;
+ ctx->Map.index = 0;
+
+ for (
+ ctx->Map.pcs[ 0 ] = RtemsMallocReqMalloc_Pre_Size_Huge;
+ ctx->Map.pcs[ 0 ] < RtemsMallocReqMalloc_Pre_Size_NA;
+ ++ctx->Map.pcs[ 0 ]
+ ) {
+ ctx->Map.entry = RtemsMallocReqMalloc_PopEntry( ctx );
+ RtemsMallocReqMalloc_TestVariant( ctx );
+ }
+}
+
+/** @} */