1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
|
SPDX-License-Identifier: CC-BY-SA-4.0 OR BSD-2-Clause
copyrights:
- Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
enabled-by: true
functional-type: action
links: []
post-conditions:
- name: Status
states:
- name: Deadlock
test-code: |
/* Checked by action */
text: |
The return status of the directive call shall be derived from
${../../status/if/deadlock:/name}.
test-epilogue: null
test-prologue: null
pre-conditions:
- name: Deadlock
states:
- name: One
test-code: |
ctx->more = false;
text: |
While the owner of the thread queue is enqueued on another thread queue
owned by the calling thread.
- name: More
test-code: |
ctx->more = true;
text: |
While the owner of the thread queue is enqueued on another thread queue
owned by a thread other than the calling thread, and so on, while the
owner of the last thread queue of this dependency chain is enqueued on a
thread queue owned by the calling thread.
test-epilogue: null
test-prologue: null
rationale: null
references: []
requirement-type: functional
skip-reasons: {}
test-action: |
Status_Control status;
TQMutexObtain( ctx->tq_ctx, TQ_MUTEX_A );
if ( ctx->tq_ctx->enqueue_variant == TQ_ENQUEUE_STICKY ) {
TQSetScheduler(
ctx->tq_ctx,
TQ_BLOCKER_A,
ctx->tq_ctx->other_scheduler_id,
PRIO_HIGH
);
}
TQSendAndWaitForExecutionStop( ctx->tq_ctx, TQ_BLOCKER_A, TQ_EVENT_ENQUEUE );
if ( ctx->more ) {
TQSend( ctx->tq_ctx, TQ_BLOCKER_B, TQ_EVENT_MUTEX_B_OBTAIN );
TQSendAndWaitForExecutionStop(
ctx->tq_ctx,
TQ_BLOCKER_A,
TQ_EVENT_MUTEX_B_OBTAIN
);
TQSend( ctx->tq_ctx, TQ_BLOCKER_B, TQ_EVENT_MUTEX_A_OBTAIN );
} else {
TQSendAndWaitForExecutionStop(
ctx->tq_ctx,
TQ_BLOCKER_A,
TQ_EVENT_MUTEX_A_OBTAIN
);
}
status = TQEnqueue( ctx->tq_ctx, TQ_WAIT_FOREVER );
T_eq_int( status, TQConvertStatus( ctx->tq_ctx, STATUS_DEADLOCK ) );
TQMutexRelease( ctx->tq_ctx, TQ_MUTEX_A );
if ( ctx->more ) {
TQSend( ctx->tq_ctx, TQ_BLOCKER_B, TQ_EVENT_MUTEX_A_RELEASE );
TQSend( ctx->tq_ctx, TQ_BLOCKER_B, TQ_EVENT_MUTEX_B_RELEASE );
TQSend( ctx->tq_ctx, TQ_BLOCKER_A, TQ_EVENT_MUTEX_B_RELEASE );
} else {
TQSend( ctx->tq_ctx, TQ_BLOCKER_A, TQ_EVENT_MUTEX_A_RELEASE );
}
if ( ctx->tq_ctx->enqueue_variant == TQ_ENQUEUE_STICKY ) {
TQSend(
ctx->tq_ctx,
TQ_BLOCKER_A,
TQ_EVENT_SURRENDER | TQ_EVENT_RUNNER_SYNC
);
TQSynchronizeRunner();
TQSetScheduler(
ctx->tq_ctx,
TQ_BLOCKER_A,
ctx->tq_ctx->runner_scheduler_id,
PRIO_HIGH
);
} else {
TQSend( ctx->tq_ctx, TQ_BLOCKER_A, TQ_EVENT_SURRENDER );
}
test-brief: null
test-cleanup: null
test-context:
- brief: |
If this member is true, then more than one mutex shall be used for the
deadlock scenario.
description: null
member: |
bool more
test-context-support: null
test-description: null
test-header:
code: null
includes: []
local-includes:
- tx-thread-queue.h
run-params:
- description: |
is the thread queue context.
dir: inout
name: tq_ctx
specifier: TQContext *${.:name}
target: testsuites/validation/tr-tq-enqueue-deadlock.h
test-includes: []
test-local-includes:
- tr-tq-enqueue-deadlock.h
test-prepare: null
test-setup: null
test-stop: null
test-support: null
test-target: testsuites/validation/tr-tq-enqueue-deadlock.c
test-teardown: null
text: |
When the calling thread attempts to be enqueued on the thread queue.
transition-map:
- enabled-by: true
post-conditions:
Status: Deadlock
pre-conditions:
Deadlock: all
type: requirement
|