summaryrefslogtreecommitdiffstats
path: root/c/src/exec/score/src
diff options
context:
space:
mode:
Diffstat (limited to 'c/src/exec/score/src')
-rw-r--r--c/src/exec/score/src/apiext.c105
-rw-r--r--c/src/exec/score/src/chain.c202
-rw-r--r--c/src/exec/score/src/coremsg.c432
-rw-r--r--c/src/exec/score/src/coremutex.c269
-rw-r--r--c/src/exec/score/src/coresem.c184
-rw-r--r--c/src/exec/score/src/coretod.c235
-rw-r--r--c/src/exec/score/src/heap.c537
-rw-r--r--c/src/exec/score/src/interr.c61
-rw-r--r--c/src/exec/score/src/isr.c60
-rw-r--r--c/src/exec/score/src/mpci.c524
-rw-r--r--c/src/exec/score/src/object.c512
-rw-r--r--c/src/exec/score/src/objectmp.c275
-rw-r--r--c/src/exec/score/src/thread.c1281
-rw-r--r--c/src/exec/score/src/threadmp.c164
-rw-r--r--c/src/exec/score/src/threadq.c967
-rw-r--r--c/src/exec/score/src/tod.c235
-rw-r--r--c/src/exec/score/src/userext.c204
-rw-r--r--c/src/exec/score/src/watchdog.c267
-rw-r--r--c/src/exec/score/src/wkspace.c88
19 files changed, 6602 insertions, 0 deletions
diff --git a/c/src/exec/score/src/apiext.c b/c/src/exec/score/src/apiext.c
new file mode 100644
index 0000000000..6e82b49a53
--- /dev/null
+++ b/c/src/exec/score/src/apiext.c
@@ -0,0 +1,105 @@
+/* apiext.c
+ *
+ * XXX
+ *
+ * COPYRIGHT (c) 1989, 1990, 1991, 1992, 1993, 1994.
+ * On-Line Applications Research Corporation (OAR).
+ * All rights assigned to U.S. Government, 1994.
+ *
+ * This material may be reproduced by or for the U.S. Government pursuant
+ * to the copyright license under the clause at DFARS 252.227-7013. This
+ * notice must appear in all copies of this file and its derivatives.
+ *
+ * $Id$
+ */
+
+
+#include <rtems/system.h>
+#include <rtems/score/apiext.h>
+
+/*PAGE
+ *
+ * _API_extensions_Initialization
+ */
+
+void _API_extensions_Initialization( void )
+{
+ _Chain_Initialize_empty( &_API_extensions_List );
+}
+
+/*PAGE
+ *
+ * _API_extensions_Add
+ */
+
+void _API_extensions_Add(
+ API_extensions_Control *the_extension
+)
+{
+ _Chain_Append( &_API_extensions_List, &the_extension->Node );
+}
+
+/*PAGE
+ *
+ * _API_extensions_Run_predriver
+ */
+
+void _API_extensions_Run_predriver( void )
+{
+ Chain_Node *the_node;
+ API_extensions_Control *the_extension;
+
+ for ( the_node = _API_extensions_List.first ;
+ !_Chain_Is_tail( &_API_extensions_List, the_node ) ;
+ the_node = the_node->next ) {
+
+ the_extension = (API_extensions_Control *) the_node;
+
+ if ( the_extension->predriver_hook )
+ (*the_extension->predriver_hook)();
+ }
+}
+
+/*PAGE
+ *
+ * _API_extensions_Run_postdriver
+ */
+
+void _API_extensions_Run_postdriver( void )
+{
+ Chain_Node *the_node;
+ API_extensions_Control *the_extension;
+
+ for ( the_node = _API_extensions_List.first ;
+ !_Chain_Is_tail( &_API_extensions_List, the_node ) ;
+ the_node = the_node->next ) {
+
+ the_extension = (API_extensions_Control *) the_node;
+
+ if ( the_extension->postdriver_hook )
+ (*the_extension->postdriver_hook)();
+ }
+}
+
+/*PAGE
+ *
+ * _API_extensions_Run_postswitch
+ */
+
+void _API_extensions_Run_postswitch( void )
+{
+ Chain_Node *the_node;
+ API_extensions_Control *the_extension;
+
+ for ( the_node = _API_extensions_List.first ;
+ !_Chain_Is_tail( &_API_extensions_List, the_node ) ;
+ the_node = the_node->next ) {
+
+ the_extension = (API_extensions_Control *) the_node;
+
+ if ( the_extension->postswitch_hook )
+ (*the_extension->postswitch_hook)( _Thread_Executing );
+ }
+}
+
+/* end of file */
diff --git a/c/src/exec/score/src/chain.c b/c/src/exec/score/src/chain.c
new file mode 100644
index 0000000000..5ef56ed5ad
--- /dev/null
+++ b/c/src/exec/score/src/chain.c
@@ -0,0 +1,202 @@
+/*
+ * Chain Handler
+ *
+ * NOTE:
+ *
+ * The order of this file is to allow proper compilation due to the
+ * order of inlining required by the compiler.
+ *
+ * COPYRIGHT (c) 1989, 1990, 1991, 1992, 1993, 1994.
+ * On-Line Applications Research Corporation (OAR).
+ * All rights assigned to U.S. Government, 1994.
+ *
+ * This material may be reproduced by or for the U.S. Government pursuant
+ * to the copyright license under the clause at DFARS 252.227-7013. This
+ * notice must appear in all copies of this file and its derivatives.
+ *
+ * $Id$
+ */
+
+#include <rtems/system.h>
+#include <rtems/score/address.h>
+#include <rtems/score/chain.h>
+#include <rtems/score/isr.h>
+
+/*PAGE
+ *
+ * _Chain_Initialize
+ *
+ * This kernel routine initializes a doubly linked chain.
+ *
+ * Input parameters:
+ * the_chain - pointer to chain header
+ * starting_address - starting address of first node
+ * number_nodes - number of nodes in chain
+ * node_size - size of node in bytes
+ *
+ * Output parameters: NONE
+ */
+
+void _Chain_Initialize(
+ Chain_Control *the_chain,
+ void *starting_address,
+ unsigned32 number_nodes,
+ unsigned32 node_size
+)
+{
+ unsigned32 count;
+ Chain_Node *current;
+ Chain_Node *next;
+
+ count = number_nodes;
+ current = _Chain_Head( the_chain );
+ the_chain->permanent_null = NULL;
+ next = (Chain_Node *)starting_address;
+ while ( count-- ) {
+ current->next = next;
+ next->previous = current;
+ current = next;
+ next = (Chain_Node *)
+ _Addresses_Add_offset( (void *) next, node_size );
+ }
+ current->next = _Chain_Tail( the_chain );
+ the_chain->last = current;
+}
+
+/*PAGE
+ *
+ * _Chain_Get_first_unprotected
+ */
+
+#ifndef USE_INLINES
+Chain_Node *_Chain_Get_first_unprotected(
+ Chain_Control *the_chain
+)
+{
+ Chain_Node *return_node;
+ Chain_Node *new_first;
+
+ return_node = the_chain->first;
+ new_first = return_node->next;
+ the_chain->first = new_first;
+ new_first->previous = _Chain_Head( the_chain );
+
+ return return_node;
+}
+#endif /* USE_INLINES */
+
+/*PAGE
+ *
+ * _Chain_Get
+ *
+ * This kernel routine returns a pointer to a node taken from the
+ * given chain.
+ *
+ * Input parameters:
+ * the_chain - pointer to chain header
+ *
+ * Output parameters:
+ * return_node - pointer to node in chain allocated
+ * CHAIN_END - if no nodes available
+ *
+ * INTERRUPT LATENCY:
+ * only case
+ */
+
+Chain_Node *_Chain_Get(
+ Chain_Control *the_chain
+)
+{
+ ISR_Level level;
+ Chain_Node *return_node;
+
+ return_node = NULL;
+ _ISR_Disable( level );
+ if ( !_Chain_Is_empty( the_chain ) )
+ return_node = _Chain_Get_first_unprotected( the_chain );
+ _ISR_Enable( level );
+ return return_node;
+}
+
+/*PAGE
+ *
+ * _Chain_Append
+ *
+ * This kernel routine puts a node on the end of the specified chain.
+ *
+ * Input parameters:
+ * the_chain - pointer to chain header
+ * node - address of node to put at rear of chain
+ *
+ * Output parameters: NONE
+ *
+ * INTERRUPT LATENCY:
+ * only case
+ */
+
+void _Chain_Append(
+ Chain_Control *the_chain,
+ Chain_Node *node
+)
+{
+ ISR_Level level;
+
+ _ISR_Disable( level );
+ _Chain_Append_unprotected( the_chain, node );
+ _ISR_Enable( level );
+}
+
+/*PAGE
+ *
+ * _Chain_Extract
+ *
+ * This kernel routine deletes the given node from a chain.
+ *
+ * Input parameters:
+ * node - pointer to node in chain to be deleted
+ *
+ * Output parameters: NONE
+ *
+ * INTERRUPT LATENCY:
+ * only case
+ */
+
+void _Chain_Extract(
+ Chain_Node *node
+)
+{
+ ISR_Level level;
+
+ _ISR_Disable( level );
+ _Chain_Extract_unprotected( node );
+ _ISR_Enable( level );
+}
+
+/*PAGE
+ *
+ * _Chain_Insert
+ *
+ * This kernel routine inserts a given node after a specified node
+ * a requested chain.
+ *
+ * Input parameters:
+ * after_node - pointer to node in chain to be inserted after
+ * node - pointer to node to be inserted
+ *
+ * Output parameters: NONE
+ *
+ * INTERRUPT LATENCY:
+ * only case
+ */
+
+void _Chain_Insert(
+ Chain_Node *after_node,
+ Chain_Node *node
+)
+{
+ ISR_Level level;
+
+ _ISR_Disable( level );
+ _Chain_Insert_unprotected( after_node, node );
+ _ISR_Enable( level );
+}
diff --git a/c/src/exec/score/src/coremsg.c b/c/src/exec/score/src/coremsg.c
new file mode 100644
index 0000000000..05e4fb799d
--- /dev/null
+++ b/c/src/exec/score/src/coremsg.c
@@ -0,0 +1,432 @@
+/*
+ * CORE Message Queue Handler
+ *
+ * DESCRIPTION:
+ *
+ * This package is the implementation of the CORE Message Queue Handler.
+ * This core object provides task synchronization and communication functions
+ * via messages passed to queue objects.
+ *
+ * COPYRIGHT (c) 1989, 1990, 1991, 1992, 1993, 1994.
+ * On-Line Applications Research Corporation (OAR).
+ * All rights assigned to U.S. Government, 1994.
+ *
+ * This material may be reproduced by or for the U.S. Government pursuant
+ * to the copyright license under the clause at DFARS 252.227-7013. This
+ * notice must appear in all copies of this file and its derivatives.
+ *
+ * $Id$
+ */
+
+#include <rtems/system.h>
+#include <rtems/score/chain.h>
+#include <rtems/score/isr.h>
+#include <rtems/score/object.h>
+#include <rtems/score/coremsg.h>
+#include <rtems/score/states.h>
+#include <rtems/score/thread.h>
+#include <rtems/score/wkspace.h>
+#include <rtems/score/mpci.h>
+
+/*PAGE
+ *
+ * _CORE_message_queue_Initialize
+ *
+ * This routine initializes a newly created message queue based on the
+ * specified data.
+ *
+ * Input parameters:
+ * the_message_queue - the message queue to initialize
+ * the_class - the API specific object class
+ * the_message_queue_attributes - the message queue's attributes
+ * maximum_pending_messages - maximum message and reserved buffer count
+ * maximum_message_size - maximum size of each message
+ * proxy_extract_callout - remote extract support
+ *
+ * Output parameters:
+ * TRUE - if the message queue is initialized
+ * FALSE - if the message queue is NOT initialized
+ */
+
+boolean _CORE_message_queue_Initialize(
+ CORE_message_queue_Control *the_message_queue,
+ Objects_Classes the_class,
+ CORE_message_queue_Attributes *the_message_queue_attributes,
+ unsigned32 maximum_pending_messages,
+ unsigned32 maximum_message_size,
+ Thread_queue_Extract_callout proxy_extract_callout
+)
+{
+ unsigned32 message_buffering_required;
+ unsigned32 allocated_message_size;
+
+ the_message_queue->maximum_pending_messages = maximum_pending_messages;
+ the_message_queue->number_of_pending_messages = 0;
+ the_message_queue->maximum_message_size = maximum_message_size;
+ _CORE_message_queue_Set_notify( the_message_queue, NULL, NULL );
+
+ /*
+ * round size up to multiple of a ptr for chain init
+ */
+
+ allocated_message_size = maximum_message_size;
+ if (allocated_message_size & (sizeof(unsigned32) - 1)) {
+ allocated_message_size += sizeof(unsigned32);
+ allocated_message_size &= ~(sizeof(unsigned32) - 1);
+ }
+
+ message_buffering_required = maximum_pending_messages *
+ (allocated_message_size + sizeof(CORE_message_queue_Buffer_control));
+
+ the_message_queue->message_buffers = (CORE_message_queue_Buffer *)
+ _Workspace_Allocate( message_buffering_required );
+
+ if (the_message_queue->message_buffers == 0)
+ return FALSE;
+
+ _Chain_Initialize (
+ &the_message_queue->Inactive_messages,
+ the_message_queue->message_buffers,
+ maximum_pending_messages,
+ allocated_message_size + sizeof( CORE_message_queue_Buffer_control )
+ );
+
+ _Chain_Initialize_empty( &the_message_queue->Pending_messages );
+
+ _Thread_queue_Initialize(
+ &the_message_queue->Wait_queue,
+ the_class,
+ _CORE_message_queue_Is_priority( the_message_queue_attributes ) ?
+ THREAD_QUEUE_DISCIPLINE_PRIORITY : THREAD_QUEUE_DISCIPLINE_FIFO,
+ STATES_WAITING_FOR_MESSAGE,
+ proxy_extract_callout,
+ CORE_MESSAGE_QUEUE_STATUS_TIMEOUT
+ );
+
+ return TRUE;
+}
+
+/*PAGE
+ *
+ * _CORE_message_queue_Close
+ *
+ * This function closes a message by returning all allocated space and
+ * flushing the message_queue's task wait queue.
+ *
+ * Input parameters:
+ * the_message_queue - the message_queue to be flushed
+ * remote_extract_callout - function to invoke remotely
+ * status - status to pass to thread
+ *
+ * Output parameters: NONE
+ */
+
+void _CORE_message_queue_Close(
+ CORE_message_queue_Control *the_message_queue,
+ Thread_queue_Flush_callout remote_extract_callout,
+ unsigned32 status
+)
+{
+
+ if ( the_message_queue->number_of_pending_messages != 0 )
+ (void) _CORE_message_queue_Flush_support( the_message_queue );
+ else
+ _Thread_queue_Flush(
+ &the_message_queue->Wait_queue,
+ remote_extract_callout,
+ status
+ );
+
+ (void) _Workspace_Free( the_message_queue->message_buffers );
+
+}
+
+/*PAGE
+ *
+ * _CORE_message_queue_Flush
+ *
+ * This function flushes the message_queue's task wait queue. The number
+ * of messages flushed from the queue is returned.
+ *
+ * Input parameters:
+ * the_message_queue - the message_queue to be flushed
+ *
+ * Output parameters:
+ * returns - the number of messages flushed from the queue
+ */
+
+unsigned32 _CORE_message_queue_Flush(
+ CORE_message_queue_Control *the_message_queue
+)
+{
+ if ( the_message_queue->number_of_pending_messages != 0 )
+ return _CORE_message_queue_Flush_support( the_message_queue );
+ else
+ return 0;
+}
+
+/*PAGE
+ *
+ * _CORE_message_queue_Broadcast
+ *
+ * This function sends a message for every thread waiting on the queue and
+ * returns the number of threads made ready by the message.
+ *
+ * Input parameters:
+ * the_message_queue - message is submitted to this message queue
+ * buffer - pointer to message buffer
+ * size - size in bytes of message to send
+ * id - id of message queue
+ * api_message_queue_mp_support - api specific mp support callout
+ * count - area to store number of threads made ready
+ *
+ * Output parameters:
+ * count - number of threads made ready
+ * CORE_MESSAGE_QUEUE_SUCCESSFUL - if successful
+ * error code - if unsuccessful
+ */
+
+CORE_message_queue_Status _CORE_message_queue_Broadcast(
+ CORE_message_queue_Control *the_message_queue,
+ void *buffer,
+ unsigned32 size,
+ Objects_Id id,
+ CORE_message_queue_API_mp_support_callout api_message_queue_mp_support,
+ unsigned32 *count
+)
+{
+ Thread_Control *the_thread;
+ unsigned32 number_broadcasted;
+ Thread_Wait_information *waitp;
+ unsigned32 constrained_size;
+
+ number_broadcasted = 0;
+ while ((the_thread = _Thread_queue_Dequeue(&the_message_queue->Wait_queue))) {
+ waitp = &the_thread->Wait;
+ number_broadcasted += 1;
+
+ constrained_size = size;
+ if ( size > the_message_queue->maximum_message_size )
+ constrained_size = the_message_queue->maximum_message_size;
+
+ _CORE_message_queue_Copy_buffer(
+ buffer,
+ waitp->return_argument,
+ constrained_size
+ );
+
+ *(unsigned32 *)the_thread->Wait.return_argument_1 = size;
+
+ if ( !_Objects_Is_local_id( the_thread->Object.id ) )
+ (*api_message_queue_mp_support) ( the_thread, id );
+
+ }
+ *count = number_broadcasted;
+ return CORE_MESSAGE_QUEUE_STATUS_SUCCESSFUL;
+}
+
+/*PAGE
+ *
+ * _CORE_message_queue_Seize
+ *
+ * This kernel routine dequeues a message, copies the message buffer to
+ * a given destination buffer, and frees the message buffer to the
+ * inactive message pool. The thread will be blocked if wait is TRUE,
+ * otherwise an error will be given to the thread if no messages are available.
+ *
+ * Input parameters:
+ * the_message_queue - pointer to message queue
+ * id - id of object we are waitig on
+ * buffer - pointer to message buffer to be filled
+ * size - pointer to the size of buffer to be filled
+ * wait - TRUE if wait is allowed, FALSE otherwise
+ * timeout - time to wait for a message
+ *
+ * Output parameters: NONE
+ *
+ * NOTE: Dependent on BUFFER_LENGTH
+ *
+ * INTERRUPT LATENCY:
+ * available
+ * wait
+ */
+
+void _CORE_message_queue_Seize(
+ CORE_message_queue_Control *the_message_queue,
+ Objects_Id id,
+ void *buffer,
+ unsigned32 *size,
+ boolean wait,
+ Watchdog_Interval timeout
+)
+{
+ ISR_Level level;
+ CORE_message_queue_Buffer_control *the_message;
+ Thread_Control *executing;
+
+ executing = _Thread_Executing;
+ executing->Wait.return_code = CORE_MESSAGE_QUEUE_STATUS_SUCCESSFUL;
+ _ISR_Disable( level );
+ if ( the_message_queue->number_of_pending_messages != 0 ) {
+ the_message_queue->number_of_pending_messages -= 1;
+
+ the_message = _CORE_message_queue_Get_pending_message( the_message_queue );
+ _ISR_Enable( level );
+ *size = the_message->Contents.size;
+ _CORE_message_queue_Copy_buffer(the_message->Contents.buffer,buffer,*size );
+ _CORE_message_queue_Free_message_buffer(the_message_queue, the_message );
+ return;
+ }
+
+ if ( !wait ) {
+ _ISR_Enable( level );
+ executing->Wait.return_code = CORE_MESSAGE_QUEUE_STATUS_UNSATISFIED_NOWAIT;
+ return;
+ }
+
+ _Thread_queue_Enter_critical_section( &the_message_queue->Wait_queue );
+ executing->Wait.queue = &the_message_queue->Wait_queue;
+ executing->Wait.id = id;
+ executing->Wait.return_argument = (void *)buffer;
+ executing->Wait.return_argument_1 = (void *)size;
+ _ISR_Enable( level );
+
+ _Thread_queue_Enqueue( &the_message_queue->Wait_queue, timeout );
+}
+
+/*PAGE
+ *
+ * _CORE_message_queue_Flush_support
+ *
+ * This message handler routine removes all messages from a message queue
+ * and returns them to the inactive message pool. The number of messages
+ * flushed from the queue is returned
+ *
+ * Input parameters:
+ * the_message_queue - pointer to message queue
+ *
+ * Output parameters:
+ * returns - number of messages placed on inactive chain
+ *
+ * INTERRUPT LATENCY:
+ * only case
+ */
+
+unsigned32 _CORE_message_queue_Flush_support(
+ CORE_message_queue_Control *the_message_queue
+)
+{
+ ISR_Level level;
+ Chain_Node *inactive_first;
+ Chain_Node *message_queue_first;
+ Chain_Node *message_queue_last;
+ unsigned32 count;
+
+ _ISR_Disable( level );
+ inactive_first = the_message_queue->Inactive_messages.first;
+ message_queue_first = the_message_queue->Pending_messages.first;
+ message_queue_last = the_message_queue->Pending_messages.last;
+
+ the_message_queue->Inactive_messages.first = message_queue_first;
+ message_queue_last->next = inactive_first;
+ inactive_first->previous = message_queue_last;
+ message_queue_first->previous =
+ _Chain_Head( &the_message_queue->Inactive_messages );
+
+ _Chain_Initialize_empty( &the_message_queue->Pending_messages );
+
+ count = the_message_queue->number_of_pending_messages;
+ the_message_queue->number_of_pending_messages = 0;
+ _ISR_Enable( level );
+ return count;
+}
+
+/*PAGE
+ *
+ * _CORE_message_queue_Submit
+ *
+ * This routine implements the send and urgent message functions. It
+ * processes a message that is to be submitted to the designated
+ * message queue. The message will either be processed as a
+ * send message which it will be inserted at the rear of the queue
+ * or it will be processed as an urgent message which will be inserted
+ * at the front of the queue.
+ *
+ * Input parameters:
+ * the_message_queue - message is submitted to this message queue
+ * buffer - pointer to message buffer
+ * size - size in bytes of message to send
+ * id - id of message queue
+ * api_message_queue_mp_support - api specific mp support callout
+ * submit_type - send or urgent message
+ *
+ * Output parameters:
+ * CORE_MESSAGE_QUEUE_SUCCESSFUL - if successful
+ * error code - if unsuccessful
+ */
+
+CORE_message_queue_Status _CORE_message_queue_Submit(
+ CORE_message_queue_Control *the_message_queue,
+ void *buffer,
+ unsigned32 size,
+ Objects_Id id,
+ CORE_message_queue_API_mp_support_callout api_message_queue_mp_support,
+ CORE_message_queue_Submit_types submit_type
+)
+{
+ CORE_message_queue_Buffer_control *the_message;
+ Thread_Control *the_thread;
+
+ if ( size > the_message_queue->maximum_message_size )
+ return CORE_MESSAGE_QUEUE_STATUS_INVALID_SIZE;
+
+ /*
+ * Is there a thread currently waiting on this message queue?
+ */
+
+ the_thread = _Thread_queue_Dequeue( &the_message_queue->Wait_queue );
+ if ( the_thread )
+ {
+ _CORE_message_queue_Copy_buffer(
+ buffer,
+ the_thread->Wait.return_argument,
+ size
+ );
+ *(unsigned32 *)the_thread->Wait.return_argument_1 = size;
+
+ if ( !_Objects_Is_local_id( the_thread->Object.id ) )
+ (*api_message_queue_mp_support) ( the_thread, id );
+
+ return CORE_MESSAGE_QUEUE_STATUS_SUCCESSFUL;
+ }
+
+ /*
+ * No one waiting on this one currently.
+ * Allocate a message buffer and store it away
+ */
+
+ if ( the_message_queue->number_of_pending_messages ==
+ the_message_queue->maximum_pending_messages ) {
+ return CORE_MESSAGE_QUEUE_STATUS_TOO_MANY;
+ }
+
+ the_message = _CORE_message_queue_Allocate_message_buffer(the_message_queue);
+ if ( the_message == 0)
+ return CORE_MESSAGE_QUEUE_STATUS_UNSATISFIED;
+
+ _CORE_message_queue_Copy_buffer( buffer, the_message->Contents.buffer, size );
+ the_message->Contents.size = size;
+
+ the_message_queue->number_of_pending_messages += 1;
+
+ switch ( submit_type ) {
+ case CORE_MESSAGE_QUEUE_SEND_REQUEST:
+ _CORE_message_queue_Append( the_message_queue, the_message );
+ break;
+ case CORE_MESSAGE_QUEUE_URGENT_REQUEST:
+ _CORE_message_queue_Prepend( the_message_queue, the_message );
+ break;
+ }
+
+ return CORE_MESSAGE_QUEUE_STATUS_SUCCESSFUL;
+}
diff --git a/c/src/exec/score/src/coremutex.c b/c/src/exec/score/src/coremutex.c
new file mode 100644
index 0000000000..a0c3fd0264
--- /dev/null
+++ b/c/src/exec/score/src/coremutex.c
@@ -0,0 +1,269 @@
+/*
+ * Mutex Handler
+ *
+ * DESCRIPTION:
+ *
+ * This package is the implementation of the Mutex Handler.
+ * This handler provides synchronization and mutual exclusion capabilities.
+ *
+ * COPYRIGHT (c) 1989, 1990, 1991, 1992, 1993, 1994.
+ * On-Line Applications Research Corporation (OAR).
+ * All rights assigned to U.S. Government, 1994.
+ *
+ * This material may be reproduced by or for the U.S. Government pursuant
+ * to the copyright license under the clause at DFARS 252.227-7013. This
+ * notice must appear in all copies of this file and its derivatives.
+ *
+ * $Id$
+ */
+
+#include <rtems/system.h>
+#include <rtems/score/isr.h>
+#include <rtems/score/coremutex.h>
+#include <rtems/score/states.h>
+#include <rtems/score/thread.h>
+#include <rtems/score/threadq.h>
+
+/*PAGE
+ *
+ * _CORE_mutex_Initialize
+ *
+ * This routine initializes a mutex at create time and set the control
+ * structure according to the values passed.
+ *
+ * Input parameters:
+ * the_mutex - the mutex control block to initialize
+ * the_class - the API class of the object
+ * the_mutex_attributes - the mutex attributes specified at create time
+ * initial_lock - mutex initial lock or unlocked status
+ * proxy_extract_callout - MP specific extract callout
+ *
+ * Output parameters: NONE
+ */
+
+void _CORE_mutex_Initialize(
+ CORE_mutex_Control *the_mutex,
+ Objects_Classes the_class,
+ CORE_mutex_Attributes *the_mutex_attributes,
+ unsigned32 initial_lock,
+ Thread_queue_Extract_callout proxy_extract_callout
+)
+{
+
+/* Add this to the RTEMS environment later ?????????
+ rtems_assert( initial_lock == CORE_MUTEX_LOCKED ||
+ initial_lock == CORE_MUTEX_UNLOCKED );
+ */
+
+ the_mutex->Attributes = *the_mutex_attributes;
+ the_mutex->lock = initial_lock;
+
+ if ( initial_lock == CORE_MUTEX_LOCKED ) {
+ the_mutex->nest_count = 1;
+ the_mutex->holder = _Thread_Executing;
+ the_mutex->holder_id = _Thread_Executing->Object.id;
+ _Thread_Executing->resource_count++;
+ } else {
+ the_mutex->nest_count = 0;
+ the_mutex->holder = NULL;
+ the_mutex->holder_id = 0;
+ }
+
+ _Thread_queue_Initialize(
+ &the_mutex->Wait_queue,
+ the_class,
+ _CORE_mutex_Is_priority( the_mutex_attributes ) ?
+ THREAD_QUEUE_DISCIPLINE_PRIORITY : THREAD_QUEUE_DISCIPLINE_FIFO,
+ STATES_WAITING_FOR_MUTEX,
+ proxy_extract_callout,
+ CORE_MUTEX_TIMEOUT
+ );
+}
+
+/*PAGE
+ *
+ * _CORE_mutex_Seize
+ *
+ * This routine attempts to allocate a mutex to the calling thread.
+ *
+ * Input parameters:
+ * the_mutex - pointer to mutex control block
+ * id - id of object to wait on
+ * wait - TRUE if wait is allowed, FALSE otherwise
+ * timeout - number of ticks to wait (0 means forever)
+ *
+ * Output parameters: NONE
+ *
+ * INTERRUPT LATENCY:
+ * available
+ * wait
+ */
+
+void _CORE_mutex_Seize(
+ CORE_mutex_Control *the_mutex,
+ Objects_Id id,
+ boolean wait,
+ Watchdog_Interval timeout
+)
+{
+ Thread_Control *executing;
+ ISR_Level level;
+
+ executing = _Thread_Executing;
+ executing->Wait.return_code = CORE_MUTEX_STATUS_SUCCESSFUL;
+ _ISR_Disable( level );
+ if ( ! _CORE_mutex_Is_locked( the_mutex ) ) {
+ the_mutex->lock = CORE_MUTEX_LOCKED;
+ the_mutex->holder = executing;
+ the_mutex->holder_id = executing->Object.id;
+ the_mutex->nest_count = 1;
+ executing->resource_count++;
+ _ISR_Enable( level );
+ return;
+ }
+
+ if ( _Objects_Are_ids_equal(
+ _Thread_Executing->Object.id, the_mutex->holder_id ) ) {
+ if ( _CORE_mutex_Is_nesting_allowed( &the_mutex->Attributes ) )
+ the_mutex->nest_count++;
+ else
+ executing->Wait.return_code = CORE_MUTEX_STATUS_NESTING_NOT_ALLOWED;
+
+ _ISR_Enable( level );
+ return;
+ }
+
+ if ( !wait ) {
+ _ISR_Enable( level );
+ executing->Wait.return_code = CORE_MUTEX_STATUS_UNSATISFIED_NOWAIT;
+ return;
+ }
+
+ _Thread_queue_Enter_critical_section( &the_mutex->Wait_queue );
+ executing->Wait.queue = &the_mutex->Wait_queue;
+ executing->Wait.id = id;
+ _ISR_Enable( level );
+
+ if ( _CORE_mutex_Is_inherit_priority( &the_mutex->Attributes ) &&
+ the_mutex->holder->current_priority >
+ _Thread_Executing->current_priority ) {
+ _Thread_Change_priority(
+ the_mutex->holder, _Thread_Executing->current_priority );
+ }
+
+ _Thread_queue_Enqueue( &the_mutex->Wait_queue, timeout );
+}
+
+/*
+ * _CORE_mutex_Surrender
+ *
+ * DESCRIPTION:
+ *
+ * This routine frees a unit to the mutex. If a task was blocked waiting for
+ * a unit from this mutex, then that task will be readied and the unit
+ * given to that task. Otherwise, the unit will be returned to the mutex.
+ *
+ * Input parameters:
+ * the_mutex - the mutex to be flushed
+ * id - id of parent mutex
+ * api_mutex_mp_support - api dependent MP support actions
+ *
+ * Output parameters:
+ * CORE_MUTEX_STATUS_SUCCESSFUL - if successful
+ * core error code - if unsuccessful
+ */
+
+CORE_mutex_Status _CORE_mutex_Surrender(
+ CORE_mutex_Control *the_mutex,
+ Objects_Id id,
+ CORE_mutex_API_mp_support_callout api_mutex_mp_support
+)
+{
+ Thread_Control *the_thread;
+
+ if ( !_Objects_Are_ids_equal(
+ _Thread_Executing->Object.id, the_mutex->holder_id ) )
+ return( CORE_MUTEX_STATUS_NOT_OWNER_OF_RESOURCE );
+
+ the_mutex->nest_count--;
+
+ if ( the_mutex->nest_count != 0 )
+ return( CORE_MUTEX_STATUS_SUCCESSFUL );
+
+ _Thread_Executing->resource_count--;
+ the_mutex->holder = NULL;
+ the_mutex->holder_id = 0;
+
+ /*
+ * Whether or not someone is waiting for the mutex, an
+ * inherited priority must be lowered if this is the last
+ * mutex (i.e. resource) this task has.
+ */
+
+ if ( _CORE_mutex_Is_inherit_priority( &the_mutex->Attributes ) &&
+ _Thread_Executing->resource_count == 0 &&
+ _Thread_Executing->real_priority !=
+ _Thread_Executing->current_priority ) {
+ _Thread_Change_priority(
+ _Thread_Executing,
+ _Thread_Executing->real_priority
+ );
+ }
+
+ if ( ( the_thread = _Thread_queue_Dequeue( &the_mutex->Wait_queue ) ) ) {
+
+ if ( !_Objects_Is_local_id( the_thread->Object.id ) ) {
+
+ the_mutex->holder = NULL;
+ the_mutex->holder_id = the_thread->Object.id;
+ the_mutex->nest_count = 1;
+
+ ( *api_mutex_mp_support)( the_thread, id );
+
+ } else {
+
+ the_mutex->holder = the_thread;
+ the_mutex->holder_id = the_thread->Object.id;
+ the_thread->resource_count++;
+ the_mutex->nest_count = 1;
+
+ /*
+ * No special action for priority inheritance because the_thread
+ * is guaranteed to be the highest priority thread waiting for
+ * the mutex.
+ */
+ }
+ } else
+ the_mutex->lock = CORE_MUTEX_UNLOCKED;
+
+ return( CORE_MUTEX_STATUS_SUCCESSFUL );
+}
+
+/*PAGE
+ *
+ * _CORE_mutex_Flush
+ *
+ * This function a flushes the mutex's task wait queue.
+ *
+ * Input parameters:
+ * the_mutex - the mutex to be flushed
+ * remote_extract_callout - function to invoke remotely
+ * status - status to pass to thread
+ *
+ * Output parameters: NONE
+ */
+
+void _CORE_mutex_Flush(
+ CORE_mutex_Control *the_mutex,
+ Thread_queue_Flush_callout remote_extract_callout,
+ unsigned32 status
+)
+{
+
+ _Thread_queue_Flush(
+ &the_mutex->Wait_queue,
+ remote_extract_callout,
+ status
+ );
+
+}
diff --git a/c/src/exec/score/src/coresem.c b/c/src/exec/score/src/coresem.c
new file mode 100644
index 0000000000..0e55926304
--- /dev/null
+++ b/c/src/exec/score/src/coresem.c
@@ -0,0 +1,184 @@
+/*
+ * CORE Semaphore Handler
+ *
+ * DESCRIPTION:
+ *
+ * This package is the implementation of the CORE Semaphore Handler.
+ * This core object utilizes standard Dijkstra counting semaphores to provide
+ * synchronization and mutual exclusion capabilities.
+ *
+ * COPYRIGHT (c) 1989, 1990, 1991, 1992, 1993, 1994.
+ * On-Line Applications Research Corporation (OAR).
+ * All rights assigned to U.S. Government, 1994.
+ *
+ * This material may be reproduced by or for the U.S. Government pursuant
+ * to the copyright license under the clause at DFARS 252.227-7013. This
+ * notice must appear in all copies of this file and its derivatives.
+ *
+ * $Id$
+ */
+
+#include <rtems/system.h>
+#include <rtems/score/isr.h>
+#include <rtems/score/coresem.h>
+#include <rtems/score/states.h>
+#include <rtems/score/thread.h>
+#include <rtems/score/threadq.h>
+#include <rtems/score/mpci.h>
+
+/*PAGE
+ *
+ * CORE_semaphore_Initialize
+ *
+ * This function initialize a semaphore and sets the initial value based
+ * on the given count.
+ *
+ * Input parameters:
+ * the_semaphore - the semaphore control block to initialize
+ * the_class - the API class of the object
+ * the_semaphore_attributes - the attributes specified at create time
+ * initial_value - semaphore's initial value
+ * proxy_extract_callout - MP specific extract callout
+ *
+ * Output parameters: NONE
+ */
+
+void _CORE_semaphore_Initialize(
+ CORE_semaphore_Control *the_semaphore,
+ Objects_Classes the_class,
+ CORE_semaphore_Attributes *the_semaphore_attributes,
+ unsigned32 initial_value,
+ Thread_queue_Extract_callout proxy_extract_callout
+)
+{
+
+ the_semaphore->Attributes = *the_semaphore_attributes;
+ the_semaphore->count = initial_value;
+
+ _Thread_queue_Initialize(
+ &the_semaphore->Wait_queue,
+ the_class,
+ _CORE_semaphore_Is_priority( the_semaphore_attributes ) ?
+ THREAD_QUEUE_DISCIPLINE_PRIORITY : THREAD_QUEUE_DISCIPLINE_FIFO,
+ STATES_WAITING_FOR_SEMAPHORE,
+ proxy_extract_callout,
+ CORE_SEMAPHORE_TIMEOUT
+ );
+}
+
+/*PAGE
+ *
+ * _CORE_semaphore_Surrender
+ *
+ * Input parameters:
+ * the_semaphore - the semaphore to be flushed
+ * id - id of parent semaphore
+ * api_semaphore_mp_support - api dependent MP support actions
+ *
+ * Output parameters:
+ * CORE_SEMAPHORE_STATUS_SUCCESSFUL - if successful
+ * core error code - if unsuccessful
+ *
+ * Output parameters:
+ */
+
+CORE_semaphore_Status _CORE_semaphore_Surrender(
+ CORE_semaphore_Control *the_semaphore,
+ Objects_Id id,
+ CORE_semaphore_API_mp_support_callout api_semaphore_mp_support
+)
+{
+ Thread_Control *the_thread;
+
+ if ( (the_thread = _Thread_queue_Dequeue(&the_semaphore->Wait_queue)) ) {
+
+ if ( !_Objects_Is_local_id( the_thread->Object.id ) )
+ (*api_semaphore_mp_support) ( the_thread, id );
+
+ } else
+ the_semaphore->count += 1;
+
+ return( CORE_SEMAPHORE_STATUS_SUCCESSFUL );
+}
+
+/*PAGE
+ *
+ * _CORE_semaphore_Seize
+ *
+ * This routine attempts to allocate a core semaphore to the calling thread.
+ *
+ * Input parameters:
+ * the_semaphore - pointer to semaphore control block
+ * id - id of object to wait on
+ * wait - TRUE if wait is allowed, FALSE otherwise
+ * timeout - number of ticks to wait (0 means forever)
+ *
+ * Output parameters: NONE
+ *
+ * INTERRUPT LATENCY:
+ * available
+ * wait
+ */
+
+void _CORE_semaphore_Seize(
+ CORE_semaphore_Control *the_semaphore,
+ Objects_Id id,
+ boolean wait,
+ Watchdog_Interval timeout
+)
+{
+ Thread_Control *executing;
+ ISR_Level level;
+
+ executing = _Thread_Executing;
+ executing->Wait.return_code = CORE_SEMAPHORE_STATUS_SUCCESSFUL;
+ _ISR_Disable( level );
+ if ( the_semaphore->count != 0 ) {
+ the_semaphore->count -= 1;
+ _ISR_Enable( level );
+ return;
+ }
+
+ if ( !wait ) {
+ _ISR_Enable( level );
+ executing->Wait.return_code = CORE_SEMAPHORE_STATUS_UNSATISFIED_NOWAIT;
+ return;
+ }
+
+ _Thread_queue_Enter_critical_section( &the_semaphore->Wait_queue );
+ executing->Wait.queue = &the_semaphore->Wait_queue;
+ executing->Wait.id = id;
+ _ISR_Enable( level );
+
+ _Thread_queue_Enqueue( &the_semaphore->Wait_queue, timeout );
+}
+
+
+/*PAGE
+ *
+ * _CORE_semaphore_Flush
+ *
+ * This function a flushes the semaphore's task wait queue.
+ *
+ * Input parameters:
+ * the_semaphore - the semaphore to be flushed
+ * remote_extract_callout - function to invoke remotely
+ * status - status to pass to thread
+ *
+ * Output parameters: NONE
+ */
+
+void _CORE_semaphore_Flush(
+ CORE_semaphore_Control *the_semaphore,
+ Thread_queue_Flush_callout remote_extract_callout,
+ unsigned32 status
+)
+{
+
+ _Thread_queue_Flush(
+ &the_semaphore->Wait_queue,
+ remote_extract_callout,
+ status
+ );
+
+}
diff --git a/c/src/exec/score/src/coretod.c b/c/src/exec/score/src/coretod.c
new file mode 100644
index 0000000000..1a11034ceb
--- /dev/null
+++ b/c/src/exec/score/src/coretod.c
@@ -0,0 +1,235 @@
+/*
+ * Time of Day (TOD) Handler
+ *
+ *
+ * COPYRIGHT (c) 1989, 1990, 1991, 1992, 1993, 1994.
+ * On-Line Applications Research Corporation (OAR).
+ * All rights assigned to U.S. Government, 1994.
+ *
+ * This material may be reproduced by or for the U.S. Government pursuant
+ * to the copyright license under the clause at DFARS 252.227-7013. This
+ * notice must appear in all copies of this file and its derivatives.
+ *
+ * $Id$
+ */
+
+#include <rtems/system.h>
+#include <rtems/score/object.h>
+#include <rtems/score/thread.h>
+#include <rtems/score/tod.h>
+#include <rtems/score/watchdog.h>
+
+/*PAGE
+ *
+ * _TOD_Handler_initialization
+ *
+ * This routine initializes the time of day handler.
+ *
+ * Input parameters:
+ * microseconds_per_tick - microseconds between clock ticks
+ *
+ * Output parameters: NONE
+ */
+
+void _TOD_Handler_initialization(
+ unsigned32 microseconds_per_tick
+)
+{
+ _TOD_Microseconds_per_tick = microseconds_per_tick;
+
+ _TOD_Ticks_since_boot = 0;
+ _TOD_Seconds_since_epoch = 0;
+
+ _TOD_Current.year = TOD_BASE_YEAR;
+ _TOD_Current.month = 1;
+ _TOD_Current.day = 1;
+ _TOD_Current.hour = 0;
+ _TOD_Current.minute = 0;
+ _TOD_Current.second = 0;
+ _TOD_Current.ticks = 0;
+
+ if ( microseconds_per_tick == 0 )
+ _TOD_Ticks_per_second = 0;
+ else
+ _TOD_Ticks_per_second =
+ TOD_MICROSECONDS_PER_SECOND / microseconds_per_tick;
+
+ _Watchdog_Initialize( &_TOD_Seconds_watchdog, _TOD_Tickle, 0, NULL );
+}
+
+/*PAGE
+ *
+ * _TOD_Set
+ *
+ * This rountine sets the current date and time with the specified
+ * new date and time structure.
+ *
+ * Input parameters:
+ * the_tod - pointer to the time and date structure
+ * seconds_since_epoch - seconds since system epoch
+ *
+ * Output parameters: NONE
+ */
+
+void _TOD_Set(
+ TOD_Control *the_tod,
+ Watchdog_Interval seconds_since_epoch
+)
+{
+ Watchdog_Interval ticks_until_next_second;
+
+ _Thread_Disable_dispatch();
+ _TOD_Deactivate();
+
+ if ( seconds_since_epoch < _TOD_Seconds_since_epoch )
+ _Watchdog_Adjust_seconds( WATCHDOG_BACKWARD,
+ _TOD_Seconds_since_epoch - seconds_since_epoch );
+ else
+ _Watchdog_Adjust_seconds( WATCHDOG_FORWARD,
+ seconds_since_epoch - _TOD_Seconds_since_epoch );
+
+ ticks_until_next_second = _TOD_Ticks_per_second;
+ if ( ticks_until_next_second > _TOD_Current.ticks )
+ ticks_until_next_second -= _TOD_Current.ticks;
+
+ _TOD_Current = *the_tod;
+ _TOD_Seconds_since_epoch = seconds_since_epoch;
+ _TOD_Activate( ticks_until_next_second );
+
+ _Thread_Enable_dispatch();
+}
+
+/*PAGE
+ *
+ * _TOD_Validate
+ *
+ * This kernel routine checks the validity of a date and time structure.
+ *
+ * Input parameters:
+ * the_tod - pointer to a time and date structure
+ *
+ * Output parameters:
+ * TRUE - if the date, time, and tick are valid
+ * FALSE - if the the_tod is invalid
+ *
+ * NOTE: This routine only works for leap-years through 2099.
+ */
+
+boolean _TOD_Validate(
+ TOD_Control *the_tod
+)
+{
+ unsigned32 days_in_month;
+
+ if ((the_tod->ticks >= _TOD_Ticks_per_second) ||
+ (the_tod->second >= TOD_SECONDS_PER_MINUTE) ||
+ (the_tod->minute >= TOD_MINUTES_PER_HOUR) ||
+ (the_tod->hour >= TOD_HOURS_PER_DAY) ||
+ (the_tod->month == 0) ||
+ (the_tod->month > TOD_MONTHS_PER_YEAR) ||
+ (the_tod->year < TOD_BASE_YEAR) ||
+ (the_tod->day == 0) )
+ return FALSE;
+
+ if ( (the_tod->year % 4) == 0 )
+ days_in_month = _TOD_Days_per_month[ 1 ][ the_tod->month ];
+ else
+ days_in_month = _TOD_Days_per_month[ 0 ][ the_tod->month ];
+
+ if ( the_tod->day > days_in_month )
+ return FALSE;
+
+ return TRUE;
+}
+
+/*PAGE
+ *
+ * _TOD_To_seconds
+ *
+ * This routine returns the seconds from the epoch until the
+ * current date and time.
+ *
+ * Input parameters:
+ * the_tod - pointer to the time and date structure
+ *
+ * Output parameters:
+ * returns - seconds since epoch until the_tod
+ */
+
+unsigned32 _TOD_To_seconds(
+ TOD_Control *the_tod
+)
+{
+ unsigned32 time;
+ unsigned32 year_mod_4;
+
+ time = the_tod->day - 1;
+ year_mod_4 = the_tod->year & 3;
+
+ if ( year_mod_4 == 0 )
+ time += _TOD_Days_to_date[ 1 ][ the_tod->month ];
+ else
+ time += _TOD_Days_to_date[ 0 ][ the_tod->month ];
+
+ time += ( (the_tod->year - TOD_BASE_YEAR) / 4 ) *
+ ( (TOD_DAYS_PER_YEAR * 4) + 1);
+
+ time += _TOD_Days_since_last_leap_year[ year_mod_4 ];
+
+ time *= TOD_SECONDS_PER_DAY;
+
+ time += ((the_tod->hour * TOD_MINUTES_PER_HOUR) + the_tod->minute)
+ * TOD_SECONDS_PER_MINUTE;
+
+ time += the_tod->second;
+
+ return( time );
+}
+
+/*PAGE
+ *
+ * _TOD_Tickle
+ *
+ * This routine updates the calendar time and tickles the
+ * per second watchdog timer chain.
+ *
+ * Input parameters:
+ * ignored - this parameter is ignored
+ *
+ * Output parameters: NONE
+ *
+ * NOTE: This routine only works for leap-years through 2099.
+ */
+
+void _TOD_Tickle(
+ Objects_Id id,
+ void *ignored
+)
+{
+ unsigned32 leap;
+
+ _TOD_Current.ticks = 0;
+ ++_TOD_Seconds_since_epoch;
+ if ( ++_TOD_Current.second >= TOD_SECONDS_PER_MINUTE ) {
+ _TOD_Current.second = 0;
+ if ( ++_TOD_Current.minute >= TOD_MINUTES_PER_HOUR ) {
+ _TOD_Current.minute = 0;
+ if ( ++_TOD_Current.hour >= TOD_HOURS_PER_DAY ) {
+ _TOD_Current.hour = 0;
+ if ( _TOD_Current.year & 0x3 ) leap = 0;
+ else leap = 1;
+ if ( ++_TOD_Current.day >
+ _TOD_Days_per_month[ leap ][ _TOD_Current.month ]) {
+ _TOD_Current.day = 1;
+ if ( ++_TOD_Current.month > TOD_MONTHS_PER_YEAR ) {
+ _TOD_Current.month = 1;
+ _TOD_Current.year++;
+ }
+ }
+ }
+ }
+ }
+
+ _Watchdog_Tickle_seconds();
+ _Watchdog_Insert_ticks( &_TOD_Seconds_watchdog, _TOD_Ticks_per_second );
+}
diff --git a/c/src/exec/score/src/heap.c b/c/src/exec/score/src/heap.c
new file mode 100644
index 0000000000..11d8a31cee
--- /dev/null
+++ b/c/src/exec/score/src/heap.c
@@ -0,0 +1,537 @@
+/*
+ * Heap Handler
+ *
+ * COPYRIGHT (c) 1989, 1990, 1991, 1992, 1993, 1994.
+ * On-Line Applications Research Corporation (OAR).
+ * All rights assigned to U.S. Government, 1994.
+ *
+ * This material may be reproduced by or for the U.S. Government pursuant
+ * to the copyright license under the clause at DFARS 252.227-7013. This
+ * notice must appear in all copies of this file and its derivatives.
+ *
+ * $Id$
+ */
+
+
+#include <rtems/system.h>
+#include <rtems/score/sysstate.h>
+#include <rtems/score/heap.h>
+
+/*PAGE
+ *
+ * _Heap_Initialize
+ *
+ * This kernel routine initializes a heap.
+ *
+ * Input parameters:
+ * the_heap - pointer to heap header
+ * starting_address - starting address of heap
+ * size - size of heap
+ * page_size - allocatable unit of memory
+ *
+ * Output parameters:
+ * returns - maximum memory available if RTEMS_SUCCESSFUL
+ * 0 - otherwise
+ *
+ * This is what a heap looks like in memory immediately
+ * after initialization:
+ *
+ * +--------------------------------+
+ * 0 | size = 0 | status = used | a.k.a. dummy back flag
+ * +--------------------------------+
+ * 4 | size = size-8 | status = free | a.k.a. front flag
+ * +--------------------------------+
+ * 8 | next = PERM HEAP_TAIL |
+ * +--------------------------------+
+ * 12 | previous = PERM HEAP_HEAD |
+ * +--------------------------------+
+ * | |
+ * | memory available |
+ * | for allocation |
+ * | |
+ * +--------------------------------+
+ * size - 8 | size = size-8 | status = free | a.k.a. back flag
+ * +--------------------------------+
+ * size - 4 | size = 0 | status = used | a.k.a. dummy front flag
+ * +--------------------------------+
+ */
+
+unsigned32 _Heap_Initialize(
+ Heap_Control *the_heap,
+ void *starting_address,
+ unsigned32 size,
+ unsigned32 page_size
+)
+{
+ Heap_Block *the_block;
+ unsigned32 the_size;
+
+ if ( !_Heap_Is_page_size_valid( page_size ) ||
+ (size < HEAP_MINIMUM_SIZE) )
+ return 0;
+
+ the_heap->page_size = page_size;
+ the_size = size - HEAP_OVERHEAD;
+
+ the_block = (Heap_Block *) starting_address;
+ the_block->back_flag = HEAP_DUMMY_FLAG;
+ the_block->front_flag = the_size;
+ the_block->next = _Heap_Tail( the_heap );
+ the_block->previous = _Heap_Head( the_heap );
+
+ the_heap->start = the_block;
+ the_heap->first = the_block;
+ the_heap->permanent_null = NULL;
+ the_heap->last = the_block;
+
+ the_block = _Heap_Next_block( the_block );
+ the_block->back_flag = the_size;
+ the_block->front_flag = HEAP_DUMMY_FLAG;
+ the_heap->final = the_block;
+
+ return ( the_size - HEAP_BLOCK_USED_OVERHEAD );
+}
+
+/*PAGE
+ *
+ * _Heap_Extend
+ *
+ * This routine grows the_heap memory area using the size bytes which
+ * begin at starting_address.
+ *
+ * Input parameters:
+ * the_heap - pointer to heap header.
+ * starting_address - pointer to the memory area.
+ * size - size in bytes of the memory block to allocate.
+ *
+ * Output parameters:
+ * *amount_extended - amount of memory added to the_heap
+ */
+
+Heap_Extend_status _Heap_Extend(
+ Heap_Control *the_heap,
+ void *starting_address,
+ unsigned32 size,
+ unsigned32 *amount_extended
+)
+{
+ Heap_Block *the_block;
+ unsigned32 *p;
+
+ /*
+ * The overhead was taken from the original heap memory.
+ */
+
+ Heap_Block *old_final;
+ Heap_Block *new_final;
+
+ /*
+ * There are five possibilities for the location of starting
+ * address:
+ *
+ * 1. non-contiguous lower address (NOT SUPPORTED)
+ * 2. contiguous lower address (NOT SUPPORTED)
+ * 3. in the heap (ERROR)
+ * 4. contiguous higher address (SUPPORTED)
+ * 5. non-contiguous higher address (NOT SUPPORTED)
+ *
+ * As noted, this code only supports (4).
+ */
+
+ if ( starting_address >= (void *) the_heap->start && /* case 3 */
+ starting_address <= (void *) the_heap->final
+ )
+ return HEAP_EXTEND_ERROR;
+
+ if ( starting_address < (void *) the_heap->start ) { /* cases 1 and 2 */
+
+ return HEAP_EXTEND_NOT_IMPLEMENTED; /* cases 1 and 2 */
+
+ } else { /* cases 4 and 5 */
+
+ the_block = (Heap_Block *) (starting_address - HEAP_OVERHEAD);
+ if ( the_block != the_heap->final )
+ return HEAP_EXTEND_NOT_IMPLEMENTED; /* case 5 */
+ }
+
+ /*
+ * Currently only case 4 should make it to this point.
+ * The basic trick is to make the extend area look like a used
+ * block and free it.
+ */
+
+ *amount_extended = size;
+
+ old_final = the_heap->final;
+ new_final = _Addresses_Add_offset( old_final, size );
+ /* SAME AS: _Addresses_Add_offset( starting_address, size-HEAP_OVERHEAD ); */
+
+ the_heap->final = new_final;
+
+ old_final->front_flag =
+ new_final->back_flag = _Heap_Build_flag( size, HEAP_BLOCK_USED );
+ new_final->front_flag = HEAP_DUMMY_FLAG;
+
+ /*
+ * Must pass in address of "user" area
+ * So add in the offset field.
+ */
+
+ p = (unsigned32 *) &old_final->next;
+ *p = sizeof(unsigned32);
+ p++;
+ _Heap_Free( the_heap, p );
+
+ return HEAP_EXTEND_SUCCESSFUL;
+}
+
+/*PAGE
+ *
+ * _Heap_Allocate
+ *
+ * This kernel routine allocates the requested size of memory
+ * from the specified heap.
+ *
+ * Input parameters:
+ * the_heap - pointer to heap header.
+ * size - size in bytes of the memory block to allocate.
+ *
+ * Output parameters:
+ * returns - starting address of memory block allocated
+ */
+
+void *_Heap_Allocate(
+ Heap_Control *the_heap,
+ unsigned32 size
+)
+{
+ unsigned32 excess;
+ unsigned32 the_size;
+ Heap_Block *the_block;
+ Heap_Block *next_block;
+ Heap_Block *temporary_block;
+ void *ptr;
+ unsigned32 offset;
+
+ excess = size % the_heap->page_size;
+ the_size = size + the_heap->page_size + HEAP_BLOCK_USED_OVERHEAD;
+
+ if ( excess )
+ the_size += the_heap->page_size - excess;
+
+ if ( the_size < sizeof( Heap_Block ) )
+ the_size = sizeof( Heap_Block );
+
+ for ( the_block = the_heap->first;
+ ;
+ the_block = the_block->next ) {
+ if ( the_block == _Heap_Tail( the_heap ) )
+ return( NULL );
+ if ( the_block->front_flag >= the_size )
+ break;
+ }
+
+ if ( (the_block->front_flag - the_size) >
+ (the_heap->page_size + HEAP_BLOCK_USED_OVERHEAD) ) {
+ the_block->front_flag -= the_size;
+ next_block = _Heap_Next_block( the_block );
+ next_block->back_flag = the_block->front_flag;
+
+ temporary_block = _Heap_Block_at( next_block, the_size );
+ temporary_block->back_flag =
+ next_block->front_flag = _Heap_Build_flag( the_size,
+ HEAP_BLOCK_USED );
+ ptr = _Heap_Start_of_user_area( next_block );
+ } else {
+ next_block = _Heap_Next_block( the_block );
+ next_block->back_flag = _Heap_Build_flag( the_block->front_flag,
+ HEAP_BLOCK_USED );
+ the_block->front_flag = next_block->back_flag;
+ the_block->next->previous = the_block->previous;
+ the_block->previous->next = the_block->next;
+ ptr = _Heap_Start_of_user_area( the_block );
+ }
+
+ /*
+ * round ptr up to a multiple of page size
+ * Have to save the bump amount in the buffer so that free can figure it out
+ */
+
+ offset = the_heap->page_size - (((unsigned32) ptr) & (the_heap->page_size - 1));
+ ptr += offset;
+ *(((unsigned32 *) ptr) - 1) = offset;
+
+#ifdef RTEMS_DEBUG
+ {
+ unsigned32 ptr_u32;
+ ptr_u32 = (unsigned32) ptr;
+ if (ptr_u32 & (the_heap->page_size - 1))
+ abort();
+ }
+#endif
+
+ return ptr;
+}
+
+/*PAGE
+ *
+ * _Heap_Size_of_user_area
+ *
+ * This kernel routine returns the size of the memory area
+ * given heap block.
+ *
+ * Input parameters:
+ * the_heap - pointer to heap header
+ * starting_address - starting address of the memory block to free.
+ * size - pointer to size of area
+ *
+ * Output parameters:
+ * size - size of area filled in
+ * TRUE - if starting_address is valid heap address
+ * FALSE - if starting_address is invalid heap address
+ */
+
+boolean _Heap_Size_of_user_area(
+ Heap_Control *the_heap,
+ void *starting_address,
+ unsigned32 *size
+)
+{
+ Heap_Block *the_block;
+ Heap_Block *next_block;
+ unsigned32 the_size;
+
+ the_block = _Heap_User_block_at( starting_address );
+
+ if ( !_Heap_Is_block_in( the_heap, the_block ) ||
+ _Heap_Is_block_free( the_block ) )
+ return( FALSE );
+
+ the_size = _Heap_Block_size( the_block );
+ next_block = _Heap_Block_at( the_block, the_size );
+
+ if ( !_Heap_Is_block_in( the_heap, next_block ) ||
+ (the_block->front_flag != next_block->back_flag) )
+ return( FALSE );
+
+ *size = the_size;
+ return( TRUE );
+}
+
+/*PAGE
+ *
+ * _Heap_Free
+ *
+ * This kernel routine returns the memory designated by the
+ * given heap and given starting address to the memory pool.
+ *
+ * Input parameters:
+ * the_heap - pointer to heap header
+ * starting_address - starting address of the memory block to free.
+ *
+ * Output parameters:
+ * TRUE - if starting_address is valid heap address
+ * FALSE - if starting_address is invalid heap address
+ */
+
+boolean _Heap_Free(
+ Heap_Control *the_heap,
+ void *starting_address
+)
+{
+ Heap_Block *the_block;
+ Heap_Block *next_block;
+ Heap_Block *new_next_block;
+ Heap_Block *previous_block;
+ Heap_Block *temporary_block;
+ unsigned32 the_size;
+
+ the_block = _Heap_User_block_at( starting_address );
+
+ if ( !_Heap_Is_block_in( the_heap, the_block ) ||
+ _Heap_Is_block_free( the_block ) ) {
+ return( FALSE );
+ }
+
+ the_size = _Heap_Block_size( the_block );
+ next_block = _Heap_Block_at( the_block, the_size );
+
+ if ( !_Heap_Is_block_in( the_heap, next_block ) ||
+ (the_block->front_flag != next_block->back_flag) ) {
+ return( FALSE );
+ }
+
+ if ( _Heap_Is_previous_block_free( the_block ) ) {
+ previous_block = _Heap_Previous_block( the_block );
+
+ if ( !_Heap_Is_block_in( the_heap, previous_block ) ) {
+ return( FALSE );
+ }
+
+ if ( _Heap_Is_block_free( next_block ) ) { /* coalesce both */
+ previous_block->front_flag += next_block->front_flag + the_size;
+ temporary_block = _Heap_Next_block( previous_block );
+ temporary_block->back_flag = previous_block->front_flag;
+ next_block->next->previous = next_block->previous;
+ next_block->previous->next = next_block->next;
+ }
+ else { /* coalesce prev */
+ previous_block->front_flag =
+ next_block->back_flag = previous_block->front_flag + the_size;
+ }
+ }
+ else if ( _Heap_Is_block_free( next_block ) ) { /* coalesce next */
+ the_block->front_flag = the_size + next_block->front_flag;
+ new_next_block = _Heap_Next_block( the_block );
+ new_next_block->back_flag = the_block->front_flag;
+ the_block->next = next_block->next;
+ the_block->previous = next_block->previous;
+ next_block->previous->next = the_block;
+ next_block->next->previous = the_block;
+
+ if (the_heap->first == next_block)
+ the_heap->first = the_block;
+ }
+ else { /* no coalesce */
+ next_block->back_flag =
+ the_block->front_flag = the_size;
+ the_block->previous = _Heap_Head( the_heap );
+ the_block->next = the_heap->first;
+ the_heap->first = the_block;
+ the_block->next->previous = the_block;
+ }
+
+ return( TRUE );
+}
+
+/*PAGE
+ *
+ * _Heap_Walk
+ *
+ * This kernel routine walks the heap and verifies its correctness.
+ *
+ * Input parameters:
+ * the_heap - pointer to heap header
+ * source - a numeric indicator of the invoker of this routine
+ * do_dump - when TRUE print the information
+ *
+ * Output parameters: NONE
+ */
+
+#ifndef RTEMS_DEBUG
+
+void _Heap_Walk(
+ Heap_Control *the_heap,
+ int source,
+ boolean do_dump
+)
+{
+}
+
+#else
+
+#include <stdio.h>
+#include <unistd.h>
+
+void _Heap_Walk(
+ Heap_Control *the_heap,
+ int source,
+ boolean do_dump
+)
+{
+ Heap_Block *the_block = 0; /* avoid warnings */
+ Heap_Block *next_block = 0; /* avoid warnings */
+ int notdone = 1;
+ int error = 0;
+ int passes = 0;
+
+ /*
+ * We don't want to allow walking the heap until we have
+ * transferred control to the user task so we watch the
+ * system state.
+ */
+
+ if ( !_System_state_Is_up( _System_state_Get() ) )
+ return;
+
+ the_block = the_heap->start;
+
+ if (do_dump == TRUE) {
+ printf("\nPASS: %d start @ 0x%p final 0x%p, first 0x%p last 0x%p\n",
+ source, the_heap->start, the_heap->final,
+ the_heap->first, the_heap->last
+ );
+ }
+
+ /*
+ * Handle the 1st block
+ */
+
+ if (the_block->back_flag != HEAP_DUMMY_FLAG) {
+ printf("PASS: %d Back flag of 1st block isn't HEAP_DUMMY_FLAG\n", source);
+ error = 1;
+ }
+
+ while (notdone) {
+ passes++;
+ if (error && (passes > 10))
+ abort();
+
+ if (do_dump == TRUE) {
+ printf("PASS: %d Block @ 0x%p Back %d, Front %d",
+ source, the_block,
+ the_block->back_flag, the_block->front_flag);
+ if ( _Heap_Is_block_free(the_block) ) {
+ printf( " Prev 0x%p, Next 0x%p\n",
+ the_block->previous, the_block->next);
+ } else {
+ printf("\n");
+ }
+ }
+
+ /*
+ * Handle the last block
+ */
+
+ if ( the_block->front_flag != HEAP_DUMMY_FLAG ) {
+ next_block = _Heap_Next_block(the_block);
+ if ( the_block->front_flag != next_block->back_flag ) {
+ error = 1;
+ printf("PASS: %d Front and back flags don't match\n", source);
+ printf(" Current Block: Back - %d, Front - %d",
+ the_block->back_flag, the_block->front_flag);
+ if (do_dump == TRUE) {
+ if (_Heap_Is_block_free(the_block)) {
+ printf(" Prev 0x%p, Next 0x%p\n",
+ the_block->previous, the_block->next);
+ } else {
+ printf("\n");
+ }
+ } else {
+ printf("\n");
+ }
+ printf(" Next Block: Back - %d, Front - %d",
+ next_block->back_flag, next_block->front_flag);
+ if (do_dump == TRUE) {
+ if (_Heap_Is_block_free(next_block)) {
+ printf(" Prev 0x%p, Next 0x%p\n",
+ the_block->previous, the_block->next);
+ } else {
+ printf("\n");
+ }
+ } else {
+ printf("\n");
+ }
+ }
+ }
+
+ if (the_block->front_flag == HEAP_DUMMY_FLAG)
+ notdone = 0;
+ else
+ the_block = next_block;
+ }
+
+ if (error)
+ abort();
+}
+#endif
diff --git a/c/src/exec/score/src/interr.c b/c/src/exec/score/src/interr.c
new file mode 100644
index 0000000000..04a77fbe37
--- /dev/null
+++ b/c/src/exec/score/src/interr.c
@@ -0,0 +1,61 @@
+/*
+ * Internal Error Handler
+ *
+ * COPYRIGHT (c) 1989, 1990, 1991, 1992, 1993, 1994.
+ * On-Line Applications Research Corporation (OAR).
+ * All rights assigned to U.S. Government, 1994.
+ *
+ * This material may be reproduced by or for the U.S. Government pursuant
+ * to the copyright license under the clause at DFARS 252.227-7013. This
+ * notice must appear in all copies of this file and its derivatives.
+ *
+ * $Id$
+ */
+
+#include <rtems/system.h>
+#include <rtems/score/interr.h>
+#include <rtems/score/sysstate.h>
+#include <rtems/score/userext.h>
+
+/*PAGE
+ *
+ * _Internal_error_Occurred
+ *
+ * This routine will invoke the fatal error handler supplied by the user
+ * followed by the the default one provided by the executive. The default
+ * error handler assumes no hardware is present to help inform the user
+ * of the problem. Halt stores the error code in a known register,
+ * disables interrupts, and halts the CPU. If the CPU does not have a
+ * halt instruction, it will loop to itself.
+ *
+ * Input parameters:
+ * the_source - what subsystem the error originated in
+ * is_internal - if the error was internally generated
+ * the_error - fatal error status code
+ *
+ * Output parameters:
+ * As much information as possible is stored in a CPU dependent fashion.
+ * See the CPU dependent code for more information.
+ *
+ * NOTE: The the_error is not necessarily a directive status code.
+ */
+
+void volatile _Internal_error_Occurred(
+ Internal_errors_Source the_source,
+ boolean is_internal,
+ unsigned32 the_error
+)
+{
+
+ Internal_errors_What_happened.the_source = the_source;
+ Internal_errors_What_happened.is_internal = is_internal;
+ Internal_errors_What_happened.the_error = the_error;
+
+ _User_extensions_Fatal( the_source, is_internal, the_error );
+
+ _System_state_Set( SYSTEM_STATE_FAILED );
+
+ _CPU_Fatal_halt( the_error );
+
+ /* will not return from this routine */
+}
diff --git a/c/src/exec/score/src/isr.c b/c/src/exec/score/src/isr.c
new file mode 100644
index 0000000000..afc4cdcf38
--- /dev/null
+++ b/c/src/exec/score/src/isr.c
@@ -0,0 +1,60 @@
+/*
+ * ISR Handler
+ *
+ *
+ * COPYRIGHT (c) 1989, 1990, 1991, 1992, 1993, 1994.
+ * On-Line Applications Research Corporation (OAR).
+ * All rights assigned to U.S. Government, 1994.
+ *
+ * This material may be reproduced by or for the U.S. Government pursuant
+ * to the copyright license under the clause at DFARS 252.227-7013. This
+ * notice must appear in all copies of this file and its derivatives.
+ *
+ * $Id$
+ */
+
+#include <rtems/system.h>
+#include <rtems/score/isr.h>
+#include <rtems/score/stack.h>
+#include <rtems/score/interr.h>
+#include <rtems/score/wkspace.h>
+
+/* _ISR_Handler_initialization
+ *
+ * This routine initializes the ISR handler.
+ *
+ * Input parameters: NONE
+ *
+ * Output parameters: NONE
+ */
+
+void _ISR_Handler_initialization( void )
+{
+ _ISR_Signals_to_thread_executing = FALSE;
+
+ _ISR_Nest_level = 0;
+
+#if ( CPU_ALLOCATE_INTERRUPT_STACK == TRUE )
+
+ if ( _CPU_Table.interrupt_stack_size < STACK_MINIMUM_SIZE )
+ _Internal_error_Occurred(
+ INTERNAL_ERROR_CORE,
+ TRUE,
+ INTERNAL_ERROR_INTERRUPT_STACK_TOO_SMALL
+ );
+
+ _CPU_Interrupt_stack_low =
+ _Workspace_Allocate_or_fatal_error( _CPU_Table.interrupt_stack_size );
+
+ _CPU_Interrupt_stack_high = _Addresses_Add_offset(
+ _CPU_Interrupt_stack_low,
+ _CPU_Table.interrupt_stack_size
+ );
+
+#endif
+
+#if ( CPU_HAS_HARDWARE_INTERRUPT_STACK == TRUE )
+ _CPU_Install_interrupt_stack();
+#endif
+
+}
diff --git a/c/src/exec/score/src/mpci.c b/c/src/exec/score/src/mpci.c
new file mode 100644
index 0000000000..c3090aacc1
--- /dev/null
+++ b/c/src/exec/score/src/mpci.c
@@ -0,0 +1,524 @@
+/*
+ * Multiprocessing Communications Interface (MPCI) Handler
+ *
+ *
+ * COPYRIGHT (c) 1989, 1990, 1991, 1992, 1993, 1994.
+ * On-Line Applications Research Corporation (OAR).
+ * All rights assigned to U.S. Government, 1994.
+ *
+ * This material may be reproduced by or for the U.S. Government pursuant
+ * to the copyright license under the clause at DFARS 252.227-7013. This
+ * notice must appear in all copies of this file and its derivatives.
+ *
+ * $Id$
+ */
+
+#include <rtems/system.h>
+#include <rtems/score/cpu.h>
+#include <rtems/score/interr.h>
+#include <rtems/score/mpci.h>
+#include <rtems/score/mppkt.h>
+#include <rtems/score/states.h>
+#include <rtems/score/thread.h>
+#include <rtems/score/threadq.h>
+#include <rtems/score/tqdata.h>
+#include <rtems/score/watchdog.h>
+#include <rtems/score/sysstate.h>
+
+#include <rtems/score/coresem.h>
+
+/*PAGE
+ *
+ * _MPCI_Handler_initialization
+ *
+ * This subprogram performs the initialization necessary for this handler.
+ */
+
+void _MPCI_Handler_initialization(
+ MPCI_Control *users_mpci_table,
+ unsigned32 timeout_status
+)
+{
+ CORE_semaphore_Attributes attributes;
+
+ if ( _System_state_Is_multiprocessing && !users_mpci_table )
+ _Internal_error_Occurred(
+ INTERNAL_ERROR_CORE,
+ TRUE,
+ INTERNAL_ERROR_NO_MPCI
+ );
+
+ _MPCI_table = users_mpci_table;
+
+ if ( !_System_state_Is_multiprocessing )
+ return;
+
+ /*
+ * Register the MP Process Packet routine.
+ */
+
+ _MPCI_Register_packet_processor(
+ MP_PACKET_MPCI_INTERNAL,
+ _MPCI_Internal_packets_Process_packet
+ );
+
+ /*
+ * Create the counting semaphore used by the MPCI Receive Server.
+ */
+
+ attributes.discipline = CORE_SEMAPHORE_DISCIPLINES_FIFO;
+
+ _CORE_semaphore_Initialize(
+ &_MPCI_Semaphore,
+ OBJECTS_NO_CLASS, /* free floating semaphore */
+ &attributes, /* the_semaphore_attributes */
+ 0, /* initial_value */
+ NULL /* proxy_extract_callout */
+ );
+
+ _Thread_queue_Initialize(
+ &_MPCI_Remote_blocked_threads,
+ OBJECTS_NO_CLASS,
+ THREAD_QUEUE_DISCIPLINE_FIFO,
+ STATES_WAITING_FOR_RPC_REPLY,
+ NULL,
+ timeout_status
+ );
+}
+
+/*PAGE
+ *
+ * _MPCI_Create_server
+ *
+ * This subprogram creates the MPCI receive server.
+ */
+
+char *_MPCI_Internal_name = "MPCI";
+
+void _MPCI_Create_server( void )
+{
+
+ if ( !_System_state_Is_multiprocessing )
+ return;
+
+ /*
+ * Initialize the MPCI Receive Server
+ */
+
+ _MPCI_Receive_server_tcb = _Thread_Internal_allocate();
+
+ _Thread_Initialize(
+ &_Thread_Internal_information,
+ _MPCI_Receive_server_tcb,
+ NULL, /* allocate the stack */
+ MPCI_RECEIVE_SERVER_STACK_SIZE,
+ CPU_ALL_TASKS_ARE_FP,
+ PRIORITY_MINIMUM,
+ FALSE, /* no preempt */
+ FALSE, /* not timesliced */
+ 0, /* all interrupts enabled */
+ _MPCI_Internal_name
+ );
+
+ _Thread_Start(
+ _MPCI_Receive_server_tcb,
+ THREAD_START_NUMERIC,
+ _MPCI_Receive_server,
+ NULL,
+ 0
+ );
+}
+
+/*PAGE
+ *
+ * _MPCI_Initialization
+ *
+ * This subprogram initializes the MPCI driver by
+ * invoking the user provided MPCI initialization callout.
+ */
+
+void _MPCI_Initialization ( void )
+{
+ (*_MPCI_table->initialization)();
+}
+
+/*PAGE
+ *
+ * _MPCI_Register_packet_processor
+ *
+ * This routine registers the MPCI packet processor for the
+ * designated object class.
+ */
+
+void _MPCI_Register_packet_processor(
+ Objects_Classes the_class,
+ MPCI_Packet_processor the_packet_processor
+
+)
+{
+ _MPCI_Packet_processors[ the_class ] = the_packet_processor;
+}
+
+/*PAGE
+ *
+ * _MPCI_Get_packet
+ *
+ * This subprogram obtains a packet by invoking the user provided
+ * MPCI get packet callout.
+ */
+
+MP_packet_Prefix *_MPCI_Get_packet ( void )
+{
+ MP_packet_Prefix *the_packet;
+
+ (*_MPCI_table->get_packet)( &the_packet );
+
+ if ( the_packet == NULL )
+ _Internal_error_Occurred(
+ INTERNAL_ERROR_CORE,
+ TRUE,
+ INTERNAL_ERROR_OUT_OF_PACKETS
+ );
+
+ /*
+ * Put in a default timeout that will be used for
+ * all packets that do not otherwise have a timeout.
+ */
+
+ the_packet->timeout = MPCI_DEFAULT_TIMEOUT;
+
+ return the_packet;
+}
+
+/*PAGE
+ *
+ * _MPCI_Return_packet
+ *
+ * This subprogram returns a packet by invoking the user provided
+ * MPCI return packet callout.
+ */
+
+void _MPCI_Return_packet (
+ MP_packet_Prefix *the_packet
+)
+{
+ (*_MPCI_table->return_packet)( the_packet );
+}
+
+/*PAGE
+ *
+ * _MPCI_Send_process_packet
+ *
+ * This subprogram sends a process packet by invoking the user provided
+ * MPCI send callout.
+ */
+
+void _MPCI_Send_process_packet (
+ unsigned32 destination,
+ MP_packet_Prefix *the_packet
+)
+{
+ the_packet->source_tid = _Thread_Executing->Object.id;
+ the_packet->to_convert =
+ ( the_packet->to_convert - sizeof(MP_packet_Prefix) ) /
+ sizeof(unsigned32);
+
+ (*_MPCI_table->send_packet)( destination, the_packet );
+}
+
+/*PAGE
+ *
+ * _MPCI_Send_request_packet
+ *
+ * This subprogram sends a request packet by invoking the user provided
+ * MPCI send callout.
+ */
+
+unsigned32 _MPCI_Send_request_packet (
+ unsigned32 destination,
+ MP_packet_Prefix *the_packet,
+ States_Control extra_state
+)
+{
+ the_packet->source_tid = _Thread_Executing->Object.id;
+ the_packet->source_priority = _Thread_Executing->current_priority;
+ the_packet->to_convert =
+ ( the_packet->to_convert - sizeof(MP_packet_Prefix) ) /
+ sizeof(unsigned32);
+
+ _Thread_Executing->Wait.id = the_packet->id;
+
+ _Thread_Executing->Wait.queue = &_MPCI_Remote_blocked_threads;
+
+ _Thread_Disable_dispatch();
+
+ (*_MPCI_table->send_packet)( destination, the_packet );
+
+ _Thread_queue_Enter_critical_section( &_MPCI_Remote_blocked_threads );
+
+ /*
+ * See if we need a default timeout
+ */
+
+ if (the_packet->timeout == MPCI_DEFAULT_TIMEOUT)
+ the_packet->timeout = _MPCI_table->default_timeout;
+
+ _Thread_queue_Enqueue( &_MPCI_Remote_blocked_threads, the_packet->timeout );
+
+ _Thread_Executing->current_state =
+ _States_Set( extra_state, _Thread_Executing->current_state );
+
+ _Thread_Enable_dispatch();
+
+ return _Thread_Executing->Wait.return_code;
+}
+
+/*PAGE
+ *
+ * _MPCI_Send_response_packet
+ *
+ * This subprogram sends a response packet by invoking the user provided
+ * MPCI send callout.
+ */
+
+void _MPCI_Send_response_packet (
+ unsigned32 destination,
+ MP_packet_Prefix *the_packet
+)
+{
+ the_packet->source_tid = _Thread_Executing->Object.id;
+
+ (*_MPCI_table->send_packet)( destination, the_packet );
+}
+
+/*PAGE
+ *
+ * _MPCI_Receive_packet
+ *
+ * This subprogram receives a packet by invoking the user provided
+ * MPCI receive callout.
+ */
+
+MP_packet_Prefix *_MPCI_Receive_packet ( void )
+{
+ MP_packet_Prefix *the_packet;
+
+ (*_MPCI_table->receive_packet)( &the_packet );
+
+ return the_packet;
+}
+
+/*PAGE
+ *
+ * _MPCI_Process_response
+ *
+ * This subprogram obtains a packet by invoking the user provided
+ * MPCI get packet callout.
+ */
+
+Thread_Control *_MPCI_Process_response (
+ MP_packet_Prefix *the_packet
+)
+{
+ Thread_Control *the_thread;
+ Objects_Locations location;
+
+ the_thread = _Thread_Get( the_packet->id, &location );
+ switch ( location ) {
+ case OBJECTS_ERROR:
+ case OBJECTS_REMOTE:
+ the_thread = NULL; /* IMPOSSIBLE */
+ break;
+ case OBJECTS_LOCAL:
+ _Thread_queue_Extract( &_MPCI_Remote_blocked_threads, the_thread );
+ the_thread->Wait.return_code = the_packet->return_code;
+ _Thread_Unnest_dispatch();
+ break;
+ }
+
+ return the_thread;
+}
+
+/*PAGE
+ *
+ * _MPCI_Receive_server
+ *
+ */
+
+Thread _MPCI_Receive_server(
+ unsigned32 ignored
+)
+{
+
+ MP_packet_Prefix *the_packet;
+ MPCI_Packet_processor the_function;
+ Thread_Control *executing;
+
+ executing = _Thread_Executing;
+
+ for ( ; ; ) {
+
+ executing->receive_packet = NULL;
+
+ _Thread_Disable_dispatch();
+ _CORE_semaphore_Seize( &_MPCI_Semaphore, 0, TRUE, WATCHDOG_NO_TIMEOUT );
+ _Thread_Enable_dispatch();
+
+ for ( ; ; ) {
+ the_packet = _MPCI_Receive_packet();
+
+ if ( !the_packet )
+ break;
+
+ executing->receive_packet = the_packet;
+
+ if ( !_Mp_packet_Is_valid_packet_class ( the_packet->the_class ) )
+ break;
+
+ the_function = _MPCI_Packet_processors[ the_packet->the_class ];
+
+ if ( !the_function )
+ _Internal_error_Occurred(
+ INTERNAL_ERROR_CORE,
+ TRUE,
+ INTERNAL_ERROR_BAD_PACKET
+ );
+
+ (*the_function)( the_packet );
+ }
+ }
+}
+
+/*PAGE
+ *
+ * _MPCI_Announce
+ *
+ */
+
+void _MPCI_Announce ( void )
+{
+ _Thread_Disable_dispatch();
+ (void) _CORE_semaphore_Surrender( &_MPCI_Semaphore, 0, 0 );
+ _Thread_Enable_dispatch();
+}
+
+/*PAGE
+ *
+ * _MPCI_Internal_packets_Send_process_packet
+ *
+ */
+
+void _MPCI_Internal_packets_Send_process_packet (
+ MPCI_Internal_Remote_operations operation
+)
+{
+ MPCI_Internal_packet *the_packet;
+
+ switch ( operation ) {
+
+ case MPCI_PACKETS_SYSTEM_VERIFY:
+
+ the_packet = _MPCI_Internal_packets_Get_packet();
+ the_packet->Prefix.the_class = MP_PACKET_MPCI_INTERNAL;
+ the_packet->Prefix.length = sizeof ( MPCI_Internal_packet );
+ the_packet->Prefix.to_convert = sizeof ( MPCI_Internal_packet );
+ the_packet->operation = operation;
+
+ the_packet->maximum_nodes = _Objects_Maximum_nodes;
+
+ the_packet->maximum_global_objects = _Objects_MP_Maximum_global_objects;
+
+ _MPCI_Send_process_packet( MPCI_ALL_NODES, &the_packet->Prefix );
+ break;
+ }
+}
+
+/*PAGE
+ *
+ * _MPCI_Internal_packets_Send_request_packet
+ *
+ * This subprogram is not needed since there are no request
+ * packets to be sent by this manager.
+ *
+ */
+
+/*PAGE
+ *
+ * _MPCI_Internal_packets_Send_response_packet
+ *
+ * This subprogram is not needed since there are no response
+ * packets to be sent by this manager.
+ *
+ */
+
+/*PAGE
+ *
+ *
+ * _MPCI_Internal_packets_Process_packet
+ *
+ */
+
+void _MPCI_Internal_packets_Process_packet (
+ MP_packet_Prefix *the_packet_prefix
+)
+{
+ MPCI_Internal_packet *the_packet;
+ unsigned32 maximum_nodes;
+ unsigned32 maximum_global_objects;
+
+ the_packet = (MPCI_Internal_packet *) the_packet_prefix;
+
+ switch ( the_packet->operation ) {
+
+ case MPCI_PACKETS_SYSTEM_VERIFY:
+
+ maximum_nodes = the_packet->maximum_nodes;
+ maximum_global_objects = the_packet->maximum_global_objects;
+ if ( maximum_nodes != _Objects_Maximum_nodes ||
+ maximum_global_objects != _Objects_MP_Maximum_global_objects ) {
+
+ _MPCI_Return_packet( the_packet_prefix );
+
+ _Internal_error_Occurred(
+ INTERNAL_ERROR_CORE,
+ TRUE,
+ INTERNAL_ERROR_INCONSISTENT_MP_INFORMATION
+ );
+ }
+
+ _MPCI_Return_packet( the_packet_prefix );
+
+ break;
+ }
+}
+
+/*PAGE
+ *
+ * _MPCI_Internal_packets_Send_object_was_deleted
+ *
+ * This subprogram is not needed since there are no objects
+ * deleted by this manager.
+ *
+ */
+
+/*PAGE
+ *
+ * _MPCI_Internal_packets_Send_extract_proxy
+ *
+ * This subprogram is not needed since there are no objects
+ * deleted by this manager.
+ *
+ */
+
+/*PAGE
+ *
+ * _MPCI_Internal_packets_Get_packet
+ *
+ */
+
+MPCI_Internal_packet *_MPCI_Internal_packets_Get_packet ( void )
+{
+ return ( (MPCI_Internal_packet *) _MPCI_Get_packet() );
+}
+
+/* end of file */
diff --git a/c/src/exec/score/src/object.c b/c/src/exec/score/src/object.c
new file mode 100644
index 0000000000..4f672086fe
--- /dev/null
+++ b/c/src/exec/score/src/object.c
@@ -0,0 +1,512 @@
+/*
+ * Object Handler
+ *
+ *
+ * COPYRIGHT (c) 1989, 1990, 1991, 1992, 1993, 1994.
+ * On-Line Applications Research Corporation (OAR).
+ * All rights assigned to U.S. Government, 1994.
+ *
+ * This material may be reproduced by or for the U.S. Government pursuant
+ * to the copyright license under the clause at DFARS 252.227-7013. This
+ * notice must appear in all copies of this file and its derivatives.
+ *
+ * $Id$
+ */
+
+#include <rtems/system.h>
+#include <rtems/score/chain.h>
+#include <rtems/score/object.h>
+#include <rtems/score/objectmp.h>
+#include <rtems/score/thread.h>
+#include <rtems/score/wkspace.h>
+#include <rtems/score/sysstate.h>
+
+/*PAGE
+ *
+ * _Objects_Handler_initialization
+ *
+ * This routine initializes the object handler.
+ *
+ * Input parameters:
+ * node - local node
+ * maximum_nodes - number of nodes in the system
+ * maximum_global_objects - number of configured global objects
+ *
+ * Output parameters: NONE
+ */
+
+void _Objects_Handler_initialization(
+ unsigned32 node,
+ unsigned32 maximum_nodes,
+ unsigned32 maximum_global_objects
+)
+{
+ if ( node < 1 || node > maximum_nodes )
+ _Internal_error_Occurred(
+ INTERNAL_ERROR_CORE,
+ TRUE,
+ INTERNAL_ERROR_INVALID_NODE
+ );
+
+ _Objects_Local_node = node;
+ _Objects_Maximum_nodes = maximum_nodes;
+
+ _Objects_MP_Handler_initialization(
+ node,
+ maximum_nodes,
+ maximum_global_objects
+ );
+}
+
+/*PAGE
+ *
+ * _Objects_Initialize_information
+ *
+ * This routine initializes all object information related data structures.
+ *
+ * Input parameters:
+ * information - object information table
+ * the_class - object class
+ * supports_global - TRUE if this is a global object class
+ * maximum - maximum objects of this class
+ * is_string - TRUE if names for this object are strings
+ * size - size of this object's control block
+ *
+ * Output parameters: NONE
+ */
+
+void _Objects_Initialize_information(
+ Objects_Information *information,
+ Objects_Classes the_class,
+ boolean supports_global,
+ unsigned32 maximum,
+ unsigned32 size,
+ boolean is_string,
+ unsigned32 maximum_name_length,
+ boolean is_thread
+)
+{
+ unsigned32 minimum_index;
+ unsigned32 index;
+ Objects_Control *the_object;
+ unsigned32 name_length;
+ void *name_area;
+
+ information->maximum = maximum;
+ information->the_class = the_class;
+ information->is_string = is_string;
+ information->is_thread = is_thread;
+
+ /*
+ * Set the entry in the object information table.
+ */
+
+ _Objects_Information_table[ the_class ] = information;
+
+ /*
+ * Calculate minimum and maximum Id's
+ */
+
+ if ( maximum == 0 ) minimum_index = 0;
+ else minimum_index = 1;
+
+ information->minimum_id =
+ _Objects_Build_id( the_class, _Objects_Local_node, minimum_index );
+
+ information->maximum_id =
+ _Objects_Build_id( the_class, _Objects_Local_node, maximum );
+
+ /*
+ * Allocate local pointer table
+ */
+
+ information->local_table = _Workspace_Allocate_or_fatal_error(
+ (maximum + 1) * sizeof(Objects_Control *)
+ );
+
+ /*
+ * Allocate name table
+ */
+
+ name_length = maximum_name_length;
+
+ if (name_length & (OBJECTS_NAME_ALIGNMENT-1))
+ name_length = (name_length + OBJECTS_NAME_ALIGNMENT) &
+ ~(OBJECTS_NAME_ALIGNMENT-1);
+
+ information->name_length = name_length;
+
+ name_area = _Workspace_Allocate_or_fatal_error( (maximum + 1) * name_length );
+ information->name_table = name_area;
+
+ /*
+ * Initialize local pointer table
+ */
+
+ for ( index=0 ; index <= maximum ; index++ ) {
+ information->local_table[ index ] = NULL;
+ }
+
+ /*
+ * Initialize objects .. if there are any
+ */
+
+ if ( maximum == 0 ) {
+ _Chain_Initialize_empty( &information->Inactive );
+ } else {
+
+ _Chain_Initialize(
+ &information->Inactive,
+ _Workspace_Allocate_or_fatal_error( maximum * size ),
+ maximum,
+ size
+ );
+
+ the_object = (Objects_Control *) information->Inactive.first;
+ for ( index=1; index <= maximum ; index++ ) {
+ the_object->id =
+ _Objects_Build_id( the_class, _Objects_Local_node, index );
+
+ the_object->name = (void *) name_area;
+
+ name_area = _Addresses_Add_offset( name_area, name_length );
+
+ the_object = (Objects_Control *) the_object->Node.next;
+ }
+
+ }
+
+ /*
+ * Take care of multiprocessing
+ */
+
+ if ( supports_global == TRUE && _System_state_Is_multiprocessing ) {
+
+ information->global_table = _Workspace_Allocate_or_fatal_error(
+ (_Objects_Maximum_nodes + 1) * sizeof(Chain_Control)
+ );
+
+ for ( index=1; index <= _Objects_Maximum_nodes ; index++ )
+ _Chain_Initialize_empty( &information->global_table[ index ] );
+ }
+ else
+ information->global_table = NULL;
+}
+
+/*PAGE
+ *
+ * _Objects_Clear_name
+ *
+ * XXX
+ */
+
+void _Objects_Clear_name(
+ void *name,
+ unsigned32 length
+)
+{
+ unsigned32 index;
+ unsigned32 maximum = length / OBJECTS_NAME_ALIGNMENT;
+ unsigned32 *name_ptr = name;
+
+ for ( index=0 ; index < maximum ; index++ )
+ *name_ptr++ = 0;
+}
+
+/*PAGE
+ *
+ * _Objects_Copy_name_string
+ *
+ * XXX
+ */
+
+void _Objects_Copy_name_string(
+ void *source,
+ void *destination
+)
+{
+ unsigned8 *source_p = source;
+ unsigned8 *destination_p = destination;
+
+ do {
+ *destination_p++ = *source_p;
+ } while ( *source_p++ );
+}
+
+/*PAGE
+ *
+ * _Objects_Copy_name_raw
+ *
+ * XXX
+ */
+
+void _Objects_Copy_name_raw(
+ void *source,
+ void *destination,
+ unsigned32 length
+)
+{
+ unsigned32 *source_p = source;
+ unsigned32 *destination_p = destination;
+ unsigned32 tmp_length = length / OBJECTS_NAME_ALIGNMENT;
+
+ while ( tmp_length-- )
+ *destination_p++ = *source_p++;
+}
+
+/*PAGE
+ *
+ * _Objects_Compare_name_string
+ *
+ * XXX
+ */
+
+boolean _Objects_Compare_name_string(
+ void *name_1,
+ void *name_2,
+ unsigned32 length
+)
+{
+ unsigned8 *name_1_p = name_1;
+ unsigned8 *name_2_p = name_2;
+ unsigned32 tmp_length = length;
+
+ do {
+ if ( *name_1_p++ != *name_2_p++ )
+ return FALSE;
+ if ( !tmp_length-- )
+ return FALSE;
+ } while ( *name_1_p );
+
+ return TRUE;
+}
+
+/*PAGE
+ *
+ * _Objects_Compare_name_raw
+ *
+ * XXX
+ */
+
+boolean _Objects_Compare_name_raw(
+ void *name_1,
+ void *name_2,
+ unsigned32 length
+)
+{
+ unsigned32 *name_1_p = name_1;
+ unsigned32 *name_2_p = name_2;
+ unsigned32 tmp_length = length / OBJECTS_NAME_ALIGNMENT;
+
+ while ( tmp_length-- )
+ if ( *name_1_p++ != *name_2_p++ )
+ return FALSE;
+
+ return TRUE;
+}
+
+
+/*PAGE
+ *
+ * _Objects_Name_to_id
+ *
+ * These kernel routines search the object table(s) for the given
+ * object name and returns the associated object id.
+ *
+ * Input parameters:
+ * information - object information
+ * name - user defined object name
+ * node - node indentifier (0 indicates any node)
+ * id - address of return ID
+ *
+ * Output parameters:
+ * id - object id
+ * OBJECTS_SUCCESSFUL - if successful
+ * error code - if unsuccessful
+ */
+
+Objects_Name_to_id_errors _Objects_Name_to_id(
+ Objects_Information *information,
+ Objects_Name name,
+ unsigned32 node,
+ Objects_Id *id
+)
+{
+ boolean search_local_node;
+ Objects_Control **objects;
+ Objects_Control *the_object;
+ unsigned32 index;
+ unsigned32 name_length;
+ Objects_Name_comparators compare_them;
+
+ if ( name == 0 )
+ return OBJECTS_INVALID_NAME;
+
+ search_local_node = FALSE;
+
+ if ( information->maximum != 0 &&
+ (node == OBJECTS_SEARCH_ALL_NODES || node == OBJECTS_SEARCH_LOCAL_NODE ||
+ _Objects_Is_local_node( node ) ) )
+ search_local_node = TRUE;
+
+ if ( search_local_node ) {
+ objects = information->local_table;
+
+ name_length = information->name_length;
+
+ if ( information->is_string ) compare_them = _Objects_Compare_name_string;
+ else compare_them = _Objects_Compare_name_raw;
+
+ for ( index = 1; index <= information->maximum; index++ ) {
+
+ the_object = objects[ index ];
+
+ if ( !the_object || !the_object->name )
+ continue;
+
+ if ( (*compare_them)( name, the_object->name, name_length ) ) {
+ *id = the_object->id;
+ return OBJECTS_SUCCESSFUL;
+ }
+ }
+ }
+
+ if ( _Objects_Is_local_node( node ) || node == OBJECTS_SEARCH_LOCAL_NODE )
+ return OBJECTS_INVALID_NAME;
+
+ return ( _Objects_MP_Global_name_search( information, name, node, id ) );
+}
+
+/*PAGE
+ *
+ * _Objects_Get
+ *
+ * This routine sets the object pointer for the given
+ * object id based on the given object information structure.
+ *
+ * Input parameters:
+ * information - pointer to entry in table for this class
+ * id - object id to search for
+ * location - address of where to store the location
+ *
+ * Output parameters:
+ * returns - address of object if local
+ * location - one of the following:
+ * OBJECTS_ERROR - invalid object ID
+ * OBJECTS_REMOTE - remote object
+ * OBJECTS_LOCAL - local object
+ */
+
+Objects_Control *_Objects_Get(
+ Objects_Information *information,
+ Objects_Id id,
+ Objects_Locations *location
+)
+{
+ Objects_Control *the_object;
+ unsigned32 index;
+
+ index = id - information->minimum_id;
+
+ if ( information->maximum >= index ) {
+ _Thread_Disable_dispatch();
+ if ( (the_object = information->local_table[index+1]) != NULL ) {
+ *location = OBJECTS_LOCAL;
+ return( the_object );
+ }
+ _Thread_Enable_dispatch();
+ *location = OBJECTS_ERROR;
+ return( NULL );
+ }
+ *location = OBJECTS_ERROR;
+ _Objects_MP_Is_remote( information, id, location, &the_object );
+ return the_object;
+}
+
+
+/*PAGE
+ *
+ * _Objects_Get_next
+ *
+ * Like _Objects_Get, but considers the 'id' as a "hint" and
+ * finds next valid one after that point.
+ * Mostly used for monitor and debug traversal of an object.
+ *
+ * Input parameters:
+ * information - pointer to entry in table for this class
+ * id - object id to search for
+ * location - address of where to store the location
+ * next_id - address to store next id to try
+ *
+ * Output parameters:
+ * returns - address of object if local
+ * location - one of the following:
+ * OBJECTS_ERROR - invalid object ID
+ * OBJECTS_REMOTE - remote object
+ * OBJECTS_LOCAL - local object
+ * next_id - will contain a reasonable "next" id to continue traversal
+ *
+ * NOTE:
+ * assumes can add '1' to an id to get to next index.
+ */
+
+Objects_Control *
+_Objects_Get_next(
+ Objects_Information *information,
+ Objects_Id id,
+ unsigned32 *location_p,
+ Objects_Id *next_id_p
+)
+{
+ Objects_Control *object;
+ Objects_Id next_id;
+
+ if (_Objects_Get_index(id) == OBJECTS_ID_INITIAL_INDEX)
+ next_id = information->minimum_id;
+ else
+ next_id = id;
+
+ do {
+ /* walked off end of list? */
+ if (_Objects_Get_index(next_id) > information->maximum)
+ {
+ *location_p = OBJECTS_ERROR;
+ goto final;
+ }
+
+ /* try to grab one */
+ object = _Objects_Get(information, next_id, location_p);
+
+ next_id++;
+
+ } while (*location_p != OBJECTS_LOCAL);
+
+ *next_id_p = next_id;
+ return object;
+
+final:
+ *next_id_p = OBJECTS_ID_FINAL;
+ return 0;
+}
+
+/*PAGE
+ *
+ * _Objects_Get_information
+ *
+ * XXX
+ */
+
+Objects_Information *_Objects_Get_information(
+ Objects_Id id
+)
+{
+ Objects_Classes the_class;
+
+ the_class = _Objects_Get_class( id );
+
+ if ( !_Objects_Is_class_valid( the_class ) )
+ return NULL;
+
+ return _Objects_Information_table[ the_class ];
+}
+
diff --git a/c/src/exec/score/src/objectmp.c b/c/src/exec/score/src/objectmp.c
new file mode 100644
index 0000000000..7546c33585
--- /dev/null
+++ b/c/src/exec/score/src/objectmp.c
@@ -0,0 +1,275 @@
+/*
+ * Multiprocessing Support for the Object Handler
+ *
+ *
+ * COPYRIGHT (c) 1989, 1990, 1991, 1992, 1993, 1994.
+ * On-Line Applications Research Corporation (OAR).
+ * All rights assigned to U.S. Government, 1994.
+ *
+ * This material may be reproduced by or for the U.S. Government pursuant
+ * to the copyright license under the clause at DFARS 252.227-7013. This
+ * notice must appear in all copies of this file and its derivatives.
+ *
+ * $Id$
+ */
+
+#include <rtems/system.h>
+#include <rtems/score/interr.h>
+#include <rtems/score/object.h>
+#include <rtems/score/wkspace.h>
+#include <rtems/score/thread.h>
+
+/*PAGE
+ *
+ * _Objects_MP_Handler_initialization
+ *
+ */
+
+void _Objects_MP_Handler_initialization (
+ unsigned32 node,
+ unsigned32 maximum_nodes,
+ unsigned32 maximum_global_objects
+)
+{
+ _Objects_MP_Maximum_global_objects = maximum_global_objects;
+
+ if ( maximum_global_objects == 0 ) {
+ _Chain_Initialize_empty( &_Objects_MP_Inactive_global_objects );
+ return;
+ }
+
+ _Chain_Initialize(
+ &_Objects_MP_Inactive_global_objects,
+ _Workspace_Allocate_or_fatal_error(
+ maximum_global_objects * sizeof( Objects_MP_Control )
+ ),
+ maximum_global_objects,
+ sizeof( Objects_MP_Control )
+ );
+
+}
+
+/*PAGE
+ *
+ * _Objects_MP_Open
+ *
+ */
+
+void _Objects_MP_Open (
+ Objects_Information *information,
+ Objects_MP_Control *the_global_object,
+ unsigned32 the_name, /* XXX -- wrong for variable */
+ Objects_Id the_id
+)
+{
+ the_global_object->Object.id = the_id;
+ the_global_object->name = the_name;
+
+ _Chain_Prepend(
+ &information->global_table[ _Objects_Get_node( the_id ) ],
+ &the_global_object->Object.Node
+ );
+
+}
+
+/*PAGE
+ *
+ * _Objects_MP_Allocate_and_open
+ *
+ */
+
+boolean _Objects_MP_Allocate_and_open (
+ Objects_Information *information,
+ unsigned32 the_name, /* XXX -- wrong for variable */
+ Objects_Id the_id,
+ boolean is_fatal_error
+)
+{
+ Objects_MP_Control *the_global_object;
+
+ the_global_object = _Objects_MP_Allocate_global_object();
+ if ( _Objects_MP_Is_null_global_object( the_global_object ) ) {
+
+ if ( is_fatal_error == FALSE )
+ return FALSE;
+
+ _Internal_error_Occurred(
+ INTERNAL_ERROR_CORE,
+ TRUE,
+ INTERNAL_ERROR_OUT_OF_GLOBAL_OBJECTS
+ );
+
+ }
+
+ _Objects_MP_Open( information, the_global_object, the_name, the_id );
+
+ return TRUE;
+}
+
+/*PAGE
+ *
+ * _Objects_MP_Close
+ *
+ */
+
+void _Objects_MP_Close (
+ Objects_Information *information,
+ Objects_Id the_id
+)
+{
+ Chain_Control *the_chain;
+ Chain_Node *the_node;
+ Objects_MP_Control *the_object;
+
+ the_chain = &information->global_table[ _Objects_Get_node( the_id ) ];
+
+ for ( the_node = the_chain->first ;
+ !_Chain_Is_tail( the_chain, the_node ) ;
+ the_node = the_node->next ) {
+
+ the_object = (Objects_MP_Control *) the_node;
+
+ if ( _Objects_Are_ids_equal( the_object->Object.id, the_id ) ) {
+
+ _Chain_Extract( the_node );
+ _Objects_MP_Free_global_object( the_object );
+ return;
+ }
+
+ }
+
+ _Internal_error_Occurred(
+ INTERNAL_ERROR_CORE,
+ TRUE,
+ INTERNAL_ERROR_INVALID_GLOBAL_ID
+ );
+}
+
+/*PAGE
+ *
+ * _Objects_MP_Global_name_search
+ *
+ */
+
+Objects_Name_to_id_errors _Objects_MP_Global_name_search (
+ Objects_Information *information,
+ Objects_Name the_name,
+ unsigned32 nodes_to_search,
+ Objects_Id *the_id
+)
+{
+ unsigned32 low_node;
+ unsigned32 high_node;
+ unsigned32 node_index;
+ Chain_Control *the_chain;
+ Chain_Node *the_node;
+ Objects_MP_Control *the_object;
+ unsigned32 name_to_use = *(unsigned32 *)the_name; /* XXX variable */
+
+ if ( nodes_to_search > _Objects_Maximum_nodes )
+ return OBJECTS_INVALID_NODE;
+
+ if ( information->global_table == NULL )
+ return OBJECTS_INVALID_NAME;
+
+ if ( nodes_to_search == OBJECTS_SEARCH_ALL_NODES ||
+ nodes_to_search == OBJECTS_SEARCH_OTHER_NODES ) {
+ low_node = 1;
+ high_node = _Objects_Maximum_nodes;
+ } else {
+ low_node =
+ high_node = nodes_to_search;
+ }
+
+ _Thread_Disable_dispatch();
+
+ for ( node_index = low_node ; node_index <= high_node ; node_index++ ) {
+
+ /*
+ * NOTE: The local node was search (if necessary) by
+ * _Objects_Name_to_id before this was invoked.
+ */
+
+ if ( !_Objects_Is_local_node( node_index ) ) {
+ the_chain = &information->global_table[ node_index ];
+
+ for ( the_node = the_chain->first ;
+ !_Chain_Is_tail( the_chain, the_node ) ;
+ the_node = the_node->next ) {
+
+ the_object = (Objects_MP_Control *) the_node;
+
+ if ( the_object->name == name_to_use ) {
+ *the_id = the_object->Object.id;
+ _Thread_Enable_dispatch();
+ return OBJECTS_SUCCESSFUL;
+ }
+ }
+ }
+ }
+
+ _Thread_Enable_dispatch();
+ return OBJECTS_INVALID_NAME;
+}
+
+/*PAGE
+ *
+ * _Objects_MP_Is_remote
+ *
+ */
+
+void _Objects_MP_Is_remote (
+ Objects_Information *information,
+ Objects_Id the_id,
+ Objects_Locations *location,
+ Objects_Control **the_object
+)
+{
+ unsigned32 node;
+ Chain_Control *the_chain;
+ Chain_Node *the_node;
+ Objects_MP_Control *the_global_object;
+
+ node = _Objects_Get_node( the_id );
+
+ /*
+ * NOTE: The local node was search (if necessary) by
+ * _Objects_Name_to_id before this was invoked.
+ *
+ * The NODE field of an object id cannot be 0
+ * because 0 is an invalid node number.
+ */
+
+ if ( node == 0 ||
+ _Objects_Is_local_node( node ) ||
+ node > _Objects_Maximum_nodes ||
+ information->global_table == NULL ) {
+
+ *location = OBJECTS_ERROR;
+ *the_object = NULL;
+ return;
+ }
+
+ _Thread_Disable_dispatch();
+
+ the_chain = &information->global_table[ node ];
+
+ for ( the_node = the_chain->first ;
+ !_Chain_Is_tail( the_chain, the_node ) ;
+ the_node = the_node->next ) {
+
+ the_global_object = (Objects_MP_Control *) the_node;
+
+ if ( _Objects_Are_ids_equal( the_global_object->Object.id, the_id ) ) {
+ _Thread_Unnest_dispatch();
+ *location = OBJECTS_REMOTE;
+ *the_object = (Objects_Control *) the_global_object;
+ return;
+ }
+ }
+
+ _Thread_Enable_dispatch();
+ *location = OBJECTS_ERROR;
+ *the_object = NULL;
+
+}
diff --git a/c/src/exec/score/src/thread.c b/c/src/exec/score/src/thread.c
new file mode 100644
index 0000000000..37b465c518
--- /dev/null
+++ b/c/src/exec/score/src/thread.c
@@ -0,0 +1,1281 @@
+/*
+ * Thread Handler
+ *
+ *
+ * COPYRIGHT (c) 1989, 1990, 1991, 1992, 1993, 1994.
+ * On-Line Applications Research Corporation (OAR).
+ * All rights assigned to U.S. Government, 1994.
+ *
+ * This material may be reproduced by or for the U.S. Government pursuant
+ * to the copyright license under the clause at DFARS 252.227-7013. This
+ * notice must appear in all copies of this file and its derivatives.
+ *
+ * $Id$
+ */
+
+#include <rtems/system.h>
+#include <rtems/score/apiext.h>
+#include <rtems/score/context.h>
+#include <rtems/score/interr.h>
+#include <rtems/score/isr.h>
+#include <rtems/score/object.h>
+#include <rtems/score/priority.h>
+#include <rtems/score/states.h>
+#include <rtems/score/sysstate.h>
+#include <rtems/score/thread.h>
+#include <rtems/score/threadq.h>
+#include <rtems/score/userext.h>
+#include <rtems/score/wkspace.h>
+
+/*PAGE
+ *
+ * _Thread_Handler_initialization
+ *
+ * This routine initializes all thread manager related data structures.
+ *
+ * Input parameters:
+ * ticks_per_timeslice - clock ticks per quantum
+ * maximum_proxies - number of proxies to initialize
+ *
+ * Output parameters: NONE
+ */
+
+char *_Thread_Idle_name = "IDLE";
+
+void _Thread_Handler_initialization(
+ unsigned32 ticks_per_timeslice,
+ unsigned32 maximum_extensions,
+ unsigned32 maximum_proxies
+)
+{
+ unsigned32 index;
+
+ /*
+ * BOTH stacks hooks must be set or both must be NULL.
+ * Do not allow mixture.
+ */
+
+ if ( !( ( _CPU_Table.stack_allocate_hook == 0 )
+ == ( _CPU_Table.stack_free_hook == 0 ) ) )
+ _Internal_error_Occurred(
+ INTERNAL_ERROR_CORE,
+ TRUE,
+ INTERNAL_ERROR_BAD_STACK_HOOK
+ );
+
+ _Context_Switch_necessary = FALSE;
+ _Thread_Executing = NULL;
+ _Thread_Heir = NULL;
+ _Thread_Allocated_fp = NULL;
+
+ _Thread_Maximum_extensions = maximum_extensions;
+
+ _Thread_Ticks_remaining_in_timeslice = ticks_per_timeslice;
+ _Thread_Ticks_per_timeslice = ticks_per_timeslice;
+
+ _Thread_Ready_chain = _Workspace_Allocate_or_fatal_error(
+ (PRIORITY_MAXIMUM + 1) * sizeof(Chain_Control)
+ );
+
+ for ( index=0; index <= PRIORITY_MAXIMUM ; index++ )
+ _Chain_Initialize_empty( &_Thread_Ready_chain[ index ] );
+
+ _Thread_MP_Handler_initialization( maximum_proxies );
+
+ /*
+ * Initialize this class of objects.
+ */
+
+ _Objects_Initialize_information(
+ &_Thread_Internal_information,
+ OBJECTS_INTERNAL_THREADS,
+ FALSE,
+ ( _System_state_Is_multiprocessing ) ? 2 : 1,
+ sizeof( Thread_Control ),
+ TRUE,
+ 8,
+ TRUE
+ );
+
+}
+
+/*PAGE
+ *
+ * _Thread_Create_idle
+ */
+
+void _Thread_Create_idle( void )
+{
+ void *idle;
+
+ /*
+ * The entire workspace is zeroed during its initialization. Thus, all
+ * fields not explicitly assigned were explicitly zeroed by
+ * _Workspace_Initialization.
+ */
+
+ _Thread_Idle = _Thread_Internal_allocate();
+
+ /*
+ * Initialize the IDLE task.
+ */
+
+#if (CPU_PROVIDES_IDLE_THREAD_BODY == TRUE)
+ idle = _CPU_Thread_Idle_body;
+#else
+ idle = _Thread_Idle_body;
+#endif
+
+ if ( _CPU_Table.idle_task )
+ idle = _CPU_Table.idle_task;
+
+ _Thread_Initialize(
+ &_Thread_Internal_information,
+ _Thread_Idle,
+ NULL, /* allocate the stack */
+ THREAD_IDLE_STACK_SIZE,
+ CPU_IDLE_TASK_IS_FP,
+ PRIORITY_MAXIMUM,
+ TRUE, /* preemptable */
+ FALSE, /* not timesliced */
+ 0, /* all interrupts enabled */
+ _Thread_Idle_name
+ );
+
+ /*
+ * WARNING!!! This is necessary to "kick" start the system and
+ * MUST be done before _Thread_Start is invoked.
+ */
+
+ _Thread_Heir =
+ _Thread_Executing = _Thread_Idle;
+
+ _Thread_Start(
+ _Thread_Idle,
+ THREAD_START_NUMERIC,
+ idle,
+ NULL,
+ 0
+ );
+
+}
+
+/*PAGE
+ *
+ * _Thread_Start_multitasking
+ *
+ * This kernel routine readies the requested thread, the thread chain
+ * is adjusted. A new heir thread may be selected.
+ *
+ * Input parameters:
+ * system_thread - pointer to system initialization thread control block
+ * idle_thread - pointer to idle thread control block
+ *
+ * Output parameters: NONE
+ *
+ * NOTE: This routine uses the "blocking" heir selection mechanism.
+ * This insures the correct heir after a thread restart.
+ *
+ * INTERRUPT LATENCY:
+ * ready chain
+ * select heir
+ */
+
+void _Thread_Start_multitasking( void )
+{
+ /*
+ * The system is now multitasking and completely initialized.
+ * This system thread now either "goes away" in a single processor
+ * system or "turns into" the server thread in an MP system.
+ */
+
+ _System_state_Set( SYSTEM_STATE_UP );
+
+ _Context_Switch_necessary = FALSE;
+
+ _Thread_Executing = _Thread_Heir;
+
+ _Context_Switch( &_Thread_BSP_context, &_Thread_Executing->Registers );
+}
+
+/*PAGE
+ *
+ * _Thread_Dispatch
+ *
+ * This kernel routine determines if a dispatch is needed, and if so
+ * dispatches to the heir thread. Once the heir is running an attempt
+ * is made to dispatch any ASRs.
+ *
+ * ALTERNATE ENTRY POINTS:
+ * void _Thread_Enable_dispatch();
+ *
+ * Input parameters: NONE
+ *
+ * Output parameters: NONE
+ *
+ * INTERRUPT LATENCY:
+ * dispatch thread
+ * no dispatch thread
+ */
+
+#if ( CPU_INLINE_ENABLE_DISPATCH == FALSE )
+void _Thread_Enable_dispatch( void )
+{
+ if ( --_Thread_Dispatch_disable_level )
+ return;
+ _Thread_Dispatch();
+}
+#endif
+
+void _Thread_Dispatch( void )
+{
+ Thread_Control *executing;
+ Thread_Control *heir;
+ ISR_Level level;
+
+ executing = _Thread_Executing;
+ _ISR_Disable( level );
+ while ( _Context_Switch_necessary == TRUE ) {
+ heir = _Thread_Heir;
+ _Thread_Dispatch_disable_level = 1;
+ _Context_Switch_necessary = FALSE;
+ _Thread_Executing = heir;
+ _ISR_Enable( level );
+
+ _User_extensions_Thread_switch( executing, heir );
+
+ _Thread_Ticks_remaining_in_timeslice = _Thread_Ticks_per_timeslice;
+
+ /*
+ * If the CPU has hardware floating point, then we must address saving
+ * and restoring it as part of the context switch.
+ *
+ * The second conditional compilation section selects the algorithm used
+ * to context switch between floating point tasks. The deferred algorithm
+ * can be significantly better in a system with few floating point tasks
+ * because it reduces the total number of save and restore FP context
+ * operations. However, this algorithm can not be used on all CPUs due
+ * to unpredictable use of FP registers by some compilers for integer
+ * operations.
+ */
+
+#if ( CPU_HARDWARE_FP == TRUE )
+#if ( CPU_USE_DEFERRED_FP_SWITCH == TRUE )
+ if ( (heir->fp_context != NULL) && !_Thread_Is_allocated_fp( heir ) ) {
+ if ( _Thread_Allocated_fp != NULL )
+ _Context_Save_fp( &_Thread_Allocated_fp->fp_context );
+ _Context_Restore_fp( &heir->fp_context );
+ _Thread_Allocated_fp = heir;
+ }
+#else
+ if ( executing->fp_context != NULL )
+ _Context_Save_fp( &executing->fp_context );
+
+ if ( heir->fp_context != NULL )
+ _Context_Restore_fp( &heir->fp_context );
+#endif
+#endif
+
+ _Context_Switch( &executing->Registers, &heir->Registers );
+
+ executing = _Thread_Executing;
+
+ _ISR_Disable( level );
+ }
+
+ _Thread_Dispatch_disable_level = 0;
+
+ _ISR_Enable( level );
+
+ if ( executing->do_post_task_switch_extension ) {
+ executing->do_post_task_switch_extension = FALSE;
+ _API_extensions_Run_postswitch();
+ }
+
+}
+
+/*PAGE
+ *
+ * _Thread_Stack_Allocate
+ *
+ * Allocate the requested stack space for the thread.
+ * return the actual size allocated after any adjustment
+ * or return zero if the allocation failed.
+ * Set the Start.stack field to the address of the stack
+ */
+
+static unsigned32 _Thread_Stack_Allocate(
+ Thread_Control *the_thread,
+ unsigned32 stack_size)
+{
+ void *stack_addr = 0;
+
+ if ( !_Stack_Is_enough( stack_size ) )
+ stack_size = STACK_MINIMUM_SIZE;
+
+ /*
+ * Call ONLY the CPU table stack allocate hook, _or_ the
+ * the RTEMS workspace allocate. This is so the stack free
+ * routine can call the correct deallocation routine.
+ */
+
+ if ( _CPU_Table.stack_allocate_hook )
+ {
+ stack_addr = (*_CPU_Table.stack_allocate_hook)( stack_size );
+ } else {
+
+ /*
+ * First pad the requested size so we allocate enough memory
+ * so the context initialization can align it properly. The address
+ * returned the workspace allocate must be directly stored in the
+ * stack control block because it is later used in the free sequence.
+ *
+ * Thus it is the responsibility of the CPU dependent code to
+ * get and keep the stack adjust factor, the stack alignment, and
+ * the context initialization sequence in sync.
+ */
+
+ stack_size = _Stack_Adjust_size( stack_size );
+ stack_addr = _Workspace_Allocate( stack_size );
+ }
+
+ if ( !stack_addr )
+ stack_size = 0;
+
+ the_thread->Start.stack = stack_addr;
+
+ return stack_size;
+}
+
+/*
+ * _Thread_Stack_Free
+ *
+ * Deallocate the Thread's stack.
+ */
+
+static void _Thread_Stack_Free(void *stack_addr)
+{
+ /*
+ * Call ONLY the CPU table stack free hook, or the
+ * the RTEMS workspace free. This is so the free
+ * routine properly matches the allocation of the stack.
+ */
+
+ if ( _CPU_Table.stack_free_hook )
+ (*_CPU_Table.stack_free_hook)( stack_addr );
+ else
+ _Workspace_Free( stack_addr );
+}
+
+/*PAGE
+ *
+ * _Thread_Initialize
+ *
+ * XXX
+ */
+
+boolean _Thread_Initialize(
+ Objects_Information *information,
+ Thread_Control *the_thread,
+ void *stack_area, /* NULL if to be allocated */
+ unsigned32 stack_size, /* insure it is >= min */
+ boolean is_fp, /* TRUE if thread uses FP */
+ Priority_Control priority,
+ boolean is_preemptible,
+ boolean is_timeslice,
+ unsigned32 isr_level,
+ Objects_Name name
+
+)
+{
+ unsigned32 actual_stack_size;
+ void *stack;
+ void *fp_area;
+ void *extensions_area;
+
+ /*
+ * Allocate and Initialize the stack for this thread.
+ */
+
+ if ( !_Stack_Is_enough( stack_size ) )
+ actual_stack_size = STACK_MINIMUM_SIZE;
+ else
+ actual_stack_size = stack_size;
+
+ actual_stack_size = _Stack_Adjust_size( actual_stack_size );
+ stack = stack_area;
+
+ if ( !stack ) {
+ actual_stack_size = _Thread_Stack_Allocate( the_thread, stack_size );
+
+ if ( !actual_stack_size )
+ return FALSE; /* stack allocation failed */
+
+ stack = the_thread->Start.stack;
+ } else
+ the_thread->Start.stack = NULL;
+
+ _Stack_Initialize(
+ &the_thread->Start.Initial_stack,
+ stack,
+ actual_stack_size
+ );
+
+ /*
+ * Allocate the floating point area for this thread
+ */
+
+ if ( is_fp ) {
+
+ fp_area = _Workspace_Allocate( CONTEXT_FP_SIZE );
+ if ( !fp_area ) {
+ if ( the_thread->Start.stack )
+ (void) _Thread_Stack_Free( the_thread->Start.stack );
+ return FALSE;
+ }
+ fp_area = _Context_Fp_start( fp_area, 0 );
+
+ } else
+ fp_area = NULL;
+
+ the_thread->fp_context = fp_area;
+ the_thread->Start.fp_context = fp_area;
+
+
+ /*
+ * Allocate the extensions area for this thread
+ */
+
+ if ( _Thread_Maximum_extensions ) {
+ extensions_area = _Workspace_Allocate(
+ (_Thread_Maximum_extensions + 1) * sizeof( void * )
+ );
+
+ if ( !extensions_area ) {
+ if ( fp_area )
+ (void) _Workspace_Free( fp_area );
+
+ if ( the_thread->Start.stack )
+ (void) _Thread_Stack_Free( the_thread->Start.stack );
+
+ return FALSE;
+ }
+ } else
+ extensions_area = NULL;
+
+ the_thread->extensions = extensions_area;
+
+ /*
+ * General initialization
+ */
+
+ the_thread->Start.is_preemptible = is_preemptible;
+ the_thread->Start.is_timeslice = is_timeslice;
+ the_thread->Start.isr_level = isr_level;
+
+ the_thread->current_state = STATES_DORMANT;
+ the_thread->resource_count = 0;
+ the_thread->real_priority = priority;
+ the_thread->Start.initial_priority = priority;
+
+ _Thread_Set_priority( the_thread, priority );
+
+ /*
+ * Open the object
+ */
+
+ _Objects_Open( information, &the_thread->Object, name );
+
+ /*
+ * Invoke create extensions
+ */
+
+ if ( !_User_extensions_Thread_create( the_thread ) ) {
+
+ if ( extensions_area )
+ (void) _Workspace_Free( extensions_area );
+
+ if ( fp_area )
+ (void) _Workspace_Free( fp_area );
+
+ if ( the_thread->Start.stack )
+ (void) _Thread_Stack_Free( the_thread->Start.stack );
+
+ return FALSE;
+ }
+
+ return TRUE;
+
+}
+
+/*
+ * _Thread_Start
+ *
+ * DESCRIPTION:
+ *
+ * XXX
+ */
+
+boolean _Thread_Start(
+ Thread_Control *the_thread,
+ Thread_Start_types the_prototype,
+ void *entry_point,
+ void *pointer_argument,
+ unsigned32 numeric_argument
+)
+{
+ if ( _States_Is_dormant( the_thread->current_state ) ) {
+
+ the_thread->Start.entry_point = entry_point;
+
+ the_thread->Start.prototype = the_prototype;
+ the_thread->Start.pointer_argument = pointer_argument;
+ the_thread->Start.numeric_argument = numeric_argument;
+
+ _Thread_Load_environment( the_thread );
+
+ _Thread_Ready( the_thread );
+
+ _User_extensions_Thread_start( the_thread );
+
+ return TRUE;
+ }
+
+ return FALSE;
+
+}
+
+/*
+ * _Thread_Restart
+ *
+ * DESCRIPTION:
+ *
+ * XXX
+ */
+
+boolean _Thread_Restart(
+ Thread_Control *the_thread,
+ void *pointer_argument,
+ unsigned32 numeric_argument
+)
+{
+ if ( !_States_Is_dormant( the_thread->current_state ) ) {
+
+ _Thread_Set_transient( the_thread );
+ the_thread->resource_count = 0;
+ the_thread->is_preemptible = the_thread->Start.is_preemptible;
+ the_thread->is_timeslice = the_thread->Start.is_timeslice;
+
+ the_thread->Start.pointer_argument = pointer_argument;
+ the_thread->Start.numeric_argument = numeric_argument;
+
+ if ( !_Thread_queue_Extract_with_proxy( the_thread ) ) {
+
+ if ( _Watchdog_Is_active( &the_thread->Timer ) )
+ (void) _Watchdog_Remove( &the_thread->Timer );
+ }
+
+ if ( the_thread->current_priority != the_thread->Start.initial_priority ) {
+ the_thread->real_priority = the_thread->Start.initial_priority;
+ _Thread_Set_priority( the_thread, the_thread->Start.initial_priority );
+ }
+
+ _Thread_Load_environment( the_thread );
+
+ _Thread_Ready( the_thread );
+
+ _User_extensions_Thread_restart( the_thread );
+
+ if ( _Thread_Is_executing ( the_thread ) )
+ _Thread_Restart_self();
+
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+/*
+ * _Thread_Close
+ *
+ * DESCRIPTION:
+ *
+ * XXX
+ */
+
+void _Thread_Close(
+ Objects_Information *information,
+ Thread_Control *the_thread
+)
+{
+ _Objects_Close( information, &the_thread->Object );
+
+ _Thread_Set_state( the_thread, STATES_TRANSIENT );
+
+ if ( !_Thread_queue_Extract_with_proxy( the_thread ) ) {
+
+ if ( _Watchdog_Is_active( &the_thread->Timer ) )
+ (void) _Watchdog_Remove( &the_thread->Timer );
+ }
+
+ _User_extensions_Thread_delete( the_thread );
+
+#if ( CPU_USE_DEFERRED_FP_SWITCH == TRUE )
+ if ( _Thread_Is_allocated_fp( the_thread ) )
+ _Thread_Deallocate_fp();
+#endif
+ the_thread->fp_context = NULL;
+
+ if ( the_thread->Start.fp_context )
+ (void) _Workspace_Free( the_thread->Start.fp_context );
+
+ if ( the_thread->Start.stack )
+ (void) _Thread_Stack_Free( the_thread->Start.stack );
+
+ if ( the_thread->extensions )
+ (void) _Workspace_Free( the_thread->extensions );
+
+ the_thread->Start.stack = NULL;
+ the_thread->extensions = NULL;
+}
+
+/*PAGE
+ *
+ * _Thread_Ready
+ *
+ * This kernel routine readies the requested thread, the thread chain
+ * is adjusted. A new heir thread may be selected.
+ *
+ * Input parameters:
+ * the_thread - pointer to thread control block
+ *
+ * Output parameters: NONE
+ *
+ * NOTE: This routine uses the "blocking" heir selection mechanism.
+ * This insures the correct heir after a thread restart.
+ *
+ * INTERRUPT LATENCY:
+ * ready chain
+ * select heir
+ */
+
+void _Thread_Ready(
+ Thread_Control *the_thread
+)
+{
+ ISR_Level level;
+ Thread_Control *heir;
+
+ _ISR_Disable( level );
+
+ the_thread->current_state = STATES_READY;
+
+ _Priority_Add_to_bit_map( &the_thread->Priority_map );
+
+ _Chain_Append_unprotected( the_thread->ready, &the_thread->Object.Node );
+
+ _ISR_Flash( level );
+
+ _Thread_Calculate_heir();
+
+ heir = _Thread_Heir;
+
+ if ( !_Thread_Is_executing( heir ) && _Thread_Executing->is_preemptible )
+ _Context_Switch_necessary = TRUE;
+
+ _ISR_Enable( level );
+}
+
+/*PAGE
+ *
+ * _Thread_Clear_state
+ *
+ * This kernel routine clears the appropriate states in the
+ * requested thread. The thread ready chain is adjusted if
+ * necessary and the Heir thread is set accordingly.
+ *
+ * Input parameters:
+ * the_thread - pointer to thread control block
+ * state - state set to clear
+ *
+ * Output parameters: NONE
+ *
+ * INTERRUPT LATENCY:
+ * priority map
+ * select heir
+ */
+
+void _Thread_Clear_state(
+ Thread_Control *the_thread,
+ States_Control state
+)
+{
+ ISR_Level level;
+
+ _ISR_Disable( level );
+ the_thread->current_state =
+ _States_Clear( state, the_thread->current_state );
+
+ if ( _States_Is_ready( the_thread->current_state ) ) {
+
+ _Priority_Add_to_bit_map( &the_thread->Priority_map );
+
+ _Chain_Append_unprotected( the_thread->ready, &the_thread->Object.Node );
+
+ _ISR_Flash( level );
+
+ if ( the_thread->current_priority < _Thread_Heir->current_priority ) {
+ _Thread_Heir = the_thread;
+ if ( _Thread_Executing->is_preemptible ||
+ the_thread->current_priority == 0 )
+ _Context_Switch_necessary = TRUE;
+ }
+ }
+ _ISR_Enable( level );
+}
+
+/*PAGE
+ *
+ * _Thread_Set_state
+ *
+ * This kernel routine sets the requested state in the THREAD. The
+ * THREAD chain is adjusted if necessary.
+ *
+ * Input parameters:
+ * the_thread - pointer to thread control block
+ * state - state to be set
+ *
+ * Output parameters: NONE
+ *
+ * INTERRUPT LATENCY:
+ * ready chain
+ * select map
+ */
+
+void _Thread_Set_state(
+ Thread_Control *the_thread,
+ States_Control state
+)
+{
+ ISR_Level level;
+ Chain_Control *ready;
+
+ ready = the_thread->ready;
+ _ISR_Disable( level );
+ if ( !_States_Is_ready( the_thread->current_state ) ) {
+ the_thread->current_state =
+ _States_Set( state, the_thread->current_state );
+ _ISR_Enable( level );
+ return;
+ }
+
+ the_thread->current_state = state;
+
+ if ( _Chain_Has_only_one_node( ready ) ) {
+
+ _Chain_Initialize_empty( ready );
+ _Priority_Remove_from_bit_map( &the_thread->Priority_map );
+
+ } else
+ _Chain_Extract_unprotected( &the_thread->Object.Node );
+
+ _ISR_Flash( level );
+
+ if ( _Thread_Is_heir( the_thread ) )
+ _Thread_Calculate_heir();
+
+ if ( _Thread_Is_executing( the_thread ) )
+ _Context_Switch_necessary = TRUE;
+
+ _ISR_Enable( level );
+}
+
+/*PAGE
+ *
+ * _Thread_Set_transient
+ *
+ * This kernel routine places the requested thread in the transient state
+ * which will remove it from the ready queue, if necessary. No
+ * rescheduling is necessary because it is assumed that the transient
+ * state will be cleared before dispatching is enabled.
+ *
+ * Input parameters:
+ * the_thread - pointer to thread control block
+ *
+ * Output parameters: NONE
+ *
+ * INTERRUPT LATENCY:
+ * only case
+ */
+
+void _Thread_Set_transient(
+ Thread_Control *the_thread
+)
+{
+ ISR_Level level;
+ unsigned32 old_state;
+ Chain_Control *ready;
+
+ ready = the_thread->ready;
+ _ISR_Disable( level );
+
+ old_state = the_thread->current_state;
+ the_thread->current_state = _States_Set( STATES_TRANSIENT, old_state );
+
+ if ( _States_Is_ready( old_state ) ) {
+ if ( _Chain_Has_only_one_node( ready ) ) {
+
+ _Chain_Initialize_empty( ready );
+ _Priority_Remove_from_bit_map( &the_thread->Priority_map );
+
+ } else
+ _Chain_Extract_unprotected( &the_thread->Object.Node );
+ }
+
+ _ISR_Enable( level );
+
+}
+
+/*PAGE
+ *
+ * _Thread_Reset_timeslice
+ *
+ * This routine will remove the running thread from the ready chain
+ * and place it immediately at the rear of this chain and then the
+ * timeslice counter is reset. The heir THREAD will be updated if
+ * the running is also the currently the heir.
+ *
+ * Input parameters: NONE
+ *
+ * Output parameters: NONE
+ *
+ * INTERRUPT LATENCY:
+ * ready chain
+ * select heir
+ */
+
+void _Thread_Reset_timeslice( void )
+{
+ ISR_Level level;
+ Thread_Control *executing;
+ Chain_Control *ready;
+
+ executing = _Thread_Executing;
+ ready = executing->ready;
+ _ISR_Disable( level );
+ if ( _Chain_Has_only_one_node( ready ) ) {
+ _Thread_Ticks_remaining_in_timeslice = _Thread_Ticks_per_timeslice;
+ _ISR_Enable( level );
+ return;
+ }
+ _Chain_Extract_unprotected( &executing->Object.Node );
+ _Chain_Append_unprotected( ready, &executing->Object.Node );
+
+ _ISR_Flash( level );
+
+ if ( _Thread_Is_heir( executing ) )
+ _Thread_Heir = (Thread_Control *) ready->first;
+
+ _Context_Switch_necessary = TRUE;
+
+ _ISR_Enable( level );
+}
+
+/*PAGE
+ *
+ * _Thread_Tickle_timeslice
+ *
+ * This scheduler routine determines if timeslicing is enabled
+ * for the currently executing thread and, if so, updates the
+ * timeslice count and checks for timeslice expiration.
+ *
+ * Input parameters: NONE
+ *
+ * Output parameters: NONE
+ */
+
+void _Thread_Tickle_timeslice( void )
+{
+ if ( !_Thread_Executing->is_timeslice ||
+ !_Thread_Executing->is_preemptible ||
+ !_States_Is_ready( _Thread_Executing->current_state ) )
+ return;
+
+ if ( --_Thread_Ticks_remaining_in_timeslice == 0 ) {
+ _Thread_Reset_timeslice();
+ }
+}
+
+/*PAGE
+ *
+ * _Thread_Yield_processor
+ *
+ * This kernel routine will remove the running THREAD from the ready chain
+ * and place it immediatly at the rear of this chain. Reset timeslice
+ * and yield the processor functions both use this routine, therefore if
+ * reset is TRUE and this is the only thread on the chain then the
+ * timeslice counter is reset. The heir THREAD will be updated if the
+ * running is also the currently the heir.
+ *
+ * Input parameters: NONE
+ *
+ * Output parameters: NONE
+ *
+ * INTERRUPT LATENCY:
+ * ready chain
+ * select heir
+ */
+
+void _Thread_Yield_processor( void )
+{
+ ISR_Level level;
+ Thread_Control *executing;
+ Chain_Control *ready;
+
+ executing = _Thread_Executing;
+ ready = executing->ready;
+ _ISR_Disable( level );
+ if ( !_Chain_Has_only_one_node( ready ) ) {
+ _Chain_Extract_unprotected( &executing->Object.Node );
+ _Chain_Append_unprotected( ready, &executing->Object.Node );
+
+ _ISR_Flash( level );
+
+ if ( _Thread_Is_heir( executing ) )
+ _Thread_Heir = (Thread_Control *) ready->first;
+ _Context_Switch_necessary = TRUE;
+ }
+ else if ( !_Thread_Is_heir( executing ) )
+ _Context_Switch_necessary = TRUE;
+
+ _ISR_Enable( level );
+}
+
+/*PAGE
+ *
+ * _Thread_Load_environment
+ *
+ * Load starting environment for another thread from its start area in the
+ * thread. Only called from t_restart and t_start.
+ *
+ * Input parameters:
+ * the_thread - thread control block pointer
+ *
+ * Output parameters: NONE
+ */
+
+void _Thread_Load_environment(
+ Thread_Control *the_thread
+)
+{
+ boolean is_fp = FALSE;
+
+ if ( the_thread->Start.fp_context ) {
+ the_thread->fp_context = the_thread->Start.fp_context;
+ _Context_Initialize_fp( &the_thread->fp_context );
+ is_fp = TRUE;
+ }
+
+ the_thread->do_post_task_switch_extension = FALSE;
+ the_thread->is_preemptible = the_thread->Start.is_preemptible;
+ the_thread->is_timeslice = the_thread->Start.is_timeslice;
+
+ _Context_Initialize(
+ &the_thread->Registers,
+ the_thread->Start.Initial_stack.area,
+ the_thread->Start.Initial_stack.size,
+ the_thread->Start.isr_level,
+ _Thread_Handler,
+ is_fp
+ );
+
+}
+
+/*PAGE
+ *
+ * _Thread_Handler
+ *
+ * This routine is the "primal" entry point for all threads.
+ * _Context_Initialize() dummies up the thread's initial context
+ * to cause the first Context_Switch() to jump to _Thread_Handler().
+ *
+ * This routine is the default thread exitted error handler. It is
+ * returned to when a thread exits. The configured fatal error handler
+ * is invoked to process the exit.
+ *
+ * NOTE:
+ *
+ * On entry, it is assumed all interrupts are blocked and that this
+ * routine needs to set the initial isr level. This may or may not
+ * actually be needed by the context switch routine and as a result
+ * interrupts may already be at there proper level. Either way,
+ * setting the initial isr level properly here is safe.
+ *
+ * Currently this is only really needed for the posix port,
+ * ref: _Context_Switch in unix/cpu.c
+ *
+ * Input parameters: NONE
+ *
+ * Output parameters: NONE
+ */
+
+void _Thread_Handler( void )
+{
+ ISR_Level level;
+ Thread_Control *executing;
+
+ executing = _Thread_Executing;
+
+ /*
+ * have to put level into a register for those cpu's that use
+ * inline asm here
+ */
+
+ level = executing->Start.isr_level;
+ _ISR_Set_level(level);
+
+ /*
+ * Take care that 'begin' extensions get to complete before
+ * 'switch' extensions can run. This means must keep dispatch
+ * disabled until all 'begin' extensions complete.
+ */
+
+ _User_extensions_Thread_begin( executing );
+
+ /*
+ * At this point, the dispatch disable level BETTER be 1.
+ */
+
+ _Thread_Enable_dispatch();
+
+ switch ( executing->Start.prototype ) {
+ case THREAD_START_NUMERIC:
+ (*executing->Start.entry_point)( executing->Start.numeric_argument );
+ break;
+ case THREAD_START_POINTER:
+ (*executing->Start.entry_point)( executing->Start.pointer_argument );
+ break;
+ case THREAD_START_BOTH_POINTER_FIRST:
+ (*executing->Start.entry_point)(
+ executing->Start.pointer_argument,
+ executing->Start.numeric_argument
+ );
+ break;
+ case THREAD_START_BOTH_NUMERIC_FIRST:
+ (*executing->Start.entry_point)(
+ executing->Start.numeric_argument,
+ executing->Start.pointer_argument
+ );
+ break;
+ }
+
+ _User_extensions_Thread_exitted( executing );
+
+ _Internal_error_Occurred(
+ INTERNAL_ERROR_CORE,
+ TRUE,
+ INTERNAL_ERROR_THREAD_EXITTED
+ );
+}
+
+/*PAGE
+ *
+ * _Thread_Delay_ended
+ *
+ * This routine processes a thread whose delay period has ended.
+ * It is called by the watchdog handler.
+ *
+ * Input parameters:
+ * id - thread id
+ *
+ * Output parameters: NONE
+ */
+
+void _Thread_Delay_ended(
+ Objects_Id id,
+ void *ignored
+)
+{
+ Thread_Control *the_thread;
+ Objects_Locations location;
+
+ the_thread = _Thread_Get( id, &location );
+ switch ( location ) {
+ case OBJECTS_ERROR:
+ case OBJECTS_REMOTE: /* impossible */
+ break;
+ case OBJECTS_LOCAL:
+ _Thread_Unblock( the_thread );
+ _Thread_Unnest_dispatch();
+ break;
+ }
+}
+
+/*PAGE
+ *
+ * _Thread_Change_priority
+ *
+ * This kernel routine changes the priority of the thread. The
+ * thread chain is adjusted if necessary.
+ *
+ * Input parameters:
+ * the_thread - pointer to thread control block
+ * new_priority - ultimate priority
+ *
+ * Output parameters: NONE
+ *
+ * INTERRUPT LATENCY:
+ * ready chain
+ * select heir
+ */
+
+void _Thread_Change_priority(
+ Thread_Control *the_thread,
+ Priority_Control new_priority
+)
+{
+ ISR_Level level;
+
+ _Thread_Set_transient( the_thread );
+
+ if ( the_thread->current_priority != new_priority )
+ _Thread_Set_priority( the_thread, new_priority );
+
+ _ISR_Disable( level );
+
+ the_thread->current_state =
+ _States_Clear( STATES_TRANSIENT, the_thread->current_state );
+
+ if ( ! _States_Is_ready( the_thread->current_state ) ) {
+ _ISR_Enable( level );
+ return;
+ }
+
+ _Priority_Add_to_bit_map( &the_thread->Priority_map );
+ _Chain_Append_unprotected( the_thread->ready, &the_thread->Object.Node );
+
+ _ISR_Flash( level );
+
+ _Thread_Calculate_heir();
+
+ if ( !_Thread_Is_executing_also_the_heir() &&
+ _Thread_Executing->is_preemptible )
+ _Context_Switch_necessary = TRUE;
+
+ _ISR_Enable( level );
+}
+
+/*PAGE
+ *
+ * _Thread_Set_priority
+ *
+ * This directive enables and disables several modes of
+ * execution for the requesting thread.
+ *
+ * Input parameters:
+ * the_thread - pointer to thread priority
+ * new_priority - new priority
+ *
+ * Output: NONE
+ */
+
+void _Thread_Set_priority(
+ Thread_Control *the_thread,
+ Priority_Control new_priority
+)
+{
+ the_thread->current_priority = new_priority;
+ the_thread->ready = &_Thread_Ready_chain[ new_priority ];
+
+ _Priority_Initialize_information( &the_thread->Priority_map, new_priority );
+}
+
+/*PAGE
+ *
+ * _Thread_Evaluate_mode
+ *
+ * XXX
+ */
+
+boolean _Thread_Evaluate_mode( void )
+{
+ Thread_Control *executing;
+
+ executing = _Thread_Executing;
+
+ if ( !_States_Is_ready( executing->current_state ) ||
+ ( !_Thread_Is_heir( executing ) && executing->is_preemptible ) ) {
+ _Context_Switch_necessary = TRUE;
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+/*PAGE
+ *
+ * _Thread_Get
+ *
+ * NOTE: If we are not using static inlines, this must be a real
+ * subroutine call.
+ *
+ * NOTE: XXX... This routine may be able to be optimized.
+ */
+
+#ifndef USE_INLINES
+
+Thread_Control *_Thread_Get (
+ Objects_Id id,
+ Objects_Locations *location
+)
+{
+ Objects_Classes the_class;
+ Objects_Information *information;
+
+ if ( _Objects_Are_ids_equal( id, OBJECTS_ID_OF_SELF ) ) {
+ _Thread_Disable_dispatch();
+ *location = OBJECTS_LOCAL;
+ return( _Thread_Executing );
+ }
+
+ the_class = _Objects_Get_class( id );
+
+ if ( the_class > OBJECTS_CLASSES_LAST ) {
+ *location = OBJECTS_ERROR;
+ return (Thread_Control *) 0;
+ }
+
+ information = _Objects_Information_table[ the_class ];
+
+ if ( !information || !information->is_thread ) {
+ *location = OBJECTS_ERROR;
+ return (Thread_Control *) 0;
+ }
+
+ return (Thread_Control *) _Objects_Get( information, id, location );
+}
+
+#endif
+
+/*PAGE
+ *
+ * _Thread_Idle_body
+ *
+ * This kernel routine is the idle thread. The idle thread runs any time
+ * no other thread is ready to run. This thread loops forever with
+ * interrupts enabled.
+ *
+ * Input parameters:
+ * ignored - this parameter is ignored
+ *
+ * Output parameters: NONE
+ */
+
+#if (CPU_PROVIDES_IDLE_THREAD_BODY == FALSE)
+Thread _Thread_Idle_body(
+ unsigned32 ignored
+)
+{
+ for( ; ; ) ;
+}
+#endif
diff --git a/c/src/exec/score/src/threadmp.c b/c/src/exec/score/src/threadmp.c
new file mode 100644
index 0000000000..31f7e5ff62
--- /dev/null
+++ b/c/src/exec/score/src/threadmp.c
@@ -0,0 +1,164 @@
+/*
+ * Multiprocessing Support for the Thread Handler
+ *
+ *
+ * COPYRIGHT (c) 1989, 1990, 1991, 1992, 1993, 1994.
+ * On-Line Applications Research Corporation (OAR).
+ * All rights assigned to U.S. Government, 1994.
+ *
+ * This material may be reproduced by or for the U.S. Government pursuant
+ * to the copyright license under the clause at DFARS 252.227-7013. This
+ * notice must appear in all copies of this file and its derivatives.
+ *
+ * $Id$
+ */
+
+#include <rtems/system.h>
+#include <rtems/score/priority.h>
+#include <rtems/score/thread.h>
+#include <rtems/score/wkspace.h>
+#include <rtems/score/isr.h>
+
+/*PAGE
+ *
+ * _Thread_MP_Handler_initialization
+ *
+ */
+
+void _Thread_MP_Handler_initialization (
+ unsigned32 maximum_proxies
+)
+{
+
+ _Chain_Initialize_empty( &_Thread_MP_Active_proxies );
+
+ if ( maximum_proxies == 0 ) {
+ _Chain_Initialize_empty( &_Thread_MP_Inactive_proxies );
+ return;
+ }
+
+
+ _Chain_Initialize(
+ &_Thread_MP_Inactive_proxies,
+ _Workspace_Allocate_or_fatal_error(
+ maximum_proxies * sizeof( Thread_Proxy_control )
+ ),
+ maximum_proxies,
+ sizeof( Thread_Proxy_control )
+ );
+
+}
+
+/*PAGE
+ *
+ * _Thread_MP_Allocate_proxy
+ *
+ */
+
+Thread_Control *_Thread_MP_Allocate_proxy (
+ States_Control the_state
+)
+{
+ Thread_Control *the_thread;
+ Thread_Proxy_control *the_proxy;
+
+ the_thread = (Thread_Control *)_Chain_Get( &_Thread_MP_Inactive_proxies );
+
+ if ( !_Thread_Is_null( the_thread ) ) {
+
+ the_proxy = (Thread_Proxy_control *) the_thread;
+
+ _Thread_Executing->Wait.return_code = THREAD_STATUS_PROXY_BLOCKING;
+
+ the_proxy->receive_packet = _Thread_MP_Receive->receive_packet;
+
+ the_proxy->Object.id = _Thread_MP_Receive->receive_packet->source_tid;
+
+ the_proxy->current_priority =
+ _Thread_MP_Receive->receive_packet->source_priority;
+
+ the_proxy->current_state = _States_Set( STATES_DORMANT, the_state );
+
+ the_proxy->Wait = _Thread_Executing->Wait;
+
+ _Chain_Append( &_Thread_MP_Active_proxies, &the_proxy->Active );
+
+ return the_thread;
+ }
+
+ _Internal_error_Occurred(
+ INTERNAL_ERROR_CORE,
+ TRUE,
+ INTERNAL_ERROR_OUT_OF_PROXIES
+ );
+
+ /*
+ * NOTE: The following return insures that the compiler will
+ * think that all paths return a value.
+ */
+
+ return NULL;
+}
+
+/*PAGE
+ *
+ * _Thread_MP_Find_proxy
+ *
+ */
+
+/*
+ * The following macro provides the offset of the Active element
+ * in the Thread_Proxy_control structure. This is the logical
+ * equivalent of the POSITION attribute in Ada.
+ */
+
+#define _Thread_MP_Proxy_Active_offset \
+ ((unsigned32)&(((Thread_Proxy_control *)0))->Active)
+
+Thread_Control *_Thread_MP_Find_proxy (
+ Objects_Id the_id
+)
+{
+
+ Chain_Node *proxy_node;
+ Thread_Control *the_thread;
+ ISR_Level level;
+
+restart:
+
+ _ISR_Disable( level );
+
+ for ( proxy_node = _Thread_MP_Active_proxies.first;
+ !_Chain_Is_tail( &_Thread_MP_Active_proxies, proxy_node ) ;
+ ) {
+
+ the_thread = _Addresses_Subtract_offset(
+ proxy_node,
+ _Thread_MP_Proxy_Active_offset
+ );
+
+ if ( _Objects_Are_ids_equal( the_thread->Object.id, the_id ) ) {
+ _ISR_Enable( level );
+ return the_thread;
+ }
+
+ _ISR_Flash( level );
+
+ proxy_node = proxy_node->next;
+
+ /*
+ * A proxy which is only dormant is not in a blocking state.
+ * Therefore, we are looking at proxy which has been moved from
+ * active to inactive chain (by an ISR) and need to restart
+ * the search.
+ */
+
+ if ( _States_Is_only_dormant( the_thread->current_state ) ) {
+ _ISR_Enable( level );
+ goto restart;
+ }
+ }
+
+ _ISR_Enable( level );
+ return NULL;
+}
diff --git a/c/src/exec/score/src/threadq.c b/c/src/exec/score/src/threadq.c
new file mode 100644
index 0000000000..c02de10ee0
--- /dev/null
+++ b/c/src/exec/score/src/threadq.c
@@ -0,0 +1,967 @@
+/*
+ * Thread Queue Handler
+ *
+ *
+ * COPYRIGHT (c) 1989, 1990, 1991, 1992, 1993, 1994.
+ * On-Line Applications Research Corporation (OAR).
+ * All rights assigned to U.S. Government, 1994.
+ *
+ * This material may be reproduced by or for the U.S. Government pursuant
+ * to the copyright license under the clause at DFARS 252.227-7013. This
+ * notice must appear in all copies of this file and its derivatives.
+ *
+ * $Id$
+ */
+
+#include <rtems/system.h>
+#include <rtems/score/chain.h>
+#include <rtems/score/isr.h>
+#include <rtems/score/object.h>
+#include <rtems/score/states.h>
+#include <rtems/score/thread.h>
+#include <rtems/score/threadq.h>
+#include <rtems/score/tqdata.h>
+
+/*PAGE
+ *
+ * _Thread_queue_Initialize
+ *
+ * This routine initializes the specified threadq.
+ *
+ * Input parameters:
+ * the_thread_queue - pointer to a threadq header
+ * the_class - class of the object to which this belongs
+ * discipline - queueing discipline
+ * state - state of waiting threads
+ * proxy_extract_callout - MP specific callout
+ * timeout_status - return on a timeout
+ *
+ * Output parameters: NONE
+ */
+
+void _Thread_queue_Initialize(
+ Thread_queue_Control *the_thread_queue,
+ Objects_Classes the_class,
+ Thread_queue_Disciplines the_discipline,
+ States_Control state,
+ Thread_queue_Extract_callout proxy_extract_callout,
+ unsigned32 timeout_status
+)
+{
+ unsigned32 index;
+
+ _Thread_queue_Extract_table[ the_class ] = proxy_extract_callout;
+
+ the_thread_queue->state = state;
+ the_thread_queue->discipline = the_discipline;
+ the_thread_queue->timeout_status = timeout_status;
+ the_thread_queue->sync_state = THREAD_QUEUE_SYNCHRONIZED;
+
+ switch ( the_discipline ) {
+ case THREAD_QUEUE_DISCIPLINE_FIFO:
+ _Chain_Initialize_empty( &the_thread_queue->Queues.Fifo );
+ break;
+ case THREAD_QUEUE_DISCIPLINE_PRIORITY:
+ for( index=0 ;
+ index < TASK_QUEUE_DATA_NUMBER_OF_PRIORITY_HEADERS ;
+ index++)
+ _Chain_Initialize_empty( &the_thread_queue->Queues.Priority[index] );
+ break;
+ }
+
+}
+
+/*PAGE
+ *
+ * _Thread_queue_Enqueue
+ *
+ * This routine blocks a thread, places it on a thread, and optionally
+ * starts a timeout timer.
+ *
+ * Input parameters:
+ * the_thread_queue - pointer to threadq
+ * timeout - interval to wait
+ *
+ * Output parameters: NONE
+ *
+ * INTERRUPT LATENCY:
+ * only case
+ */
+
+void _Thread_queue_Enqueue(
+ Thread_queue_Control *the_thread_queue,
+ Watchdog_Interval timeout
+)
+{
+ Thread_Control *the_thread;
+
+ the_thread = _Thread_Executing;
+
+ if ( _Thread_MP_Is_receive( the_thread ) && the_thread->receive_packet )
+ the_thread = _Thread_MP_Allocate_proxy( the_thread_queue->state );
+ else
+ _Thread_Set_state( the_thread, the_thread_queue->state );
+
+ if ( timeout ) {
+ _Watchdog_Initialize(
+ &the_thread->Timer,
+ _Thread_queue_Timeout,
+ the_thread->Object.id,
+ NULL
+ );
+
+ _Watchdog_Insert_ticks( &the_thread->Timer, timeout );
+ }
+
+ switch( the_thread_queue->discipline ) {
+ case THREAD_QUEUE_DISCIPLINE_FIFO:
+ _Thread_queue_Enqueue_fifo( the_thread_queue, the_thread, timeout );
+ break;
+ case THREAD_QUEUE_DISCIPLINE_PRIORITY:
+ _Thread_queue_Enqueue_priority( the_thread_queue, the_thread, timeout );
+ break;
+ }
+}
+
+/*PAGE
+ *
+ * _Thread_queue_Dequeue
+ *
+ * This routine removes a thread from the specified threadq. If the
+ * threadq discipline is FIFO, it unblocks a thread, and cancels its
+ * timeout timer. Priority discipline is processed elsewhere.
+ *
+ * Input parameters:
+ * the_thread_queue - pointer to threadq
+ *
+ * Output parameters:
+ * returns - thread dequeued or NULL
+ *
+ * INTERRUPT LATENCY:
+ * check sync
+ */
+
+Thread_Control *_Thread_queue_Dequeue(
+ Thread_queue_Control *the_thread_queue
+)
+{
+ Thread_Control *the_thread;
+
+ switch ( the_thread_queue->discipline ) {
+ case THREAD_QUEUE_DISCIPLINE_FIFO:
+ the_thread = _Thread_queue_Dequeue_fifo( the_thread_queue );
+ break;
+ case THREAD_QUEUE_DISCIPLINE_PRIORITY:
+ the_thread = _Thread_queue_Dequeue_priority( the_thread_queue );
+ break;
+ default: /* this is only to prevent warnings */
+ the_thread = NULL;
+ break;
+ }
+
+ return( the_thread );
+}
+
+/*PAGE
+ *
+ * _Thread_queue_Extract_with_proxy
+ *
+ * This routine extracts the_thread from the_thread_queue
+ * and insures that if there is a proxy for this task on
+ * another node, it is also dealt with.
+ *
+ * XXX
+ */
+
+boolean _Thread_queue_Extract_with_proxy(
+ Thread_Control *the_thread
+)
+{
+ States_Control state;
+ Objects_Classes the_class;
+ Thread_queue_Extract_callout proxy_extract_callout;
+
+ state = the_thread->current_state;
+
+ if ( _States_Is_waiting_on_thread_queue( state ) ) {
+ if ( _States_Is_waiting_for_rpc_reply( state ) &&
+ _States_Is_locally_blocked( state ) ) {
+
+ the_class = _Objects_Get_class( the_thread->Wait.id );
+
+ proxy_extract_callout = _Thread_queue_Extract_table[ the_class ];
+
+ if ( proxy_extract_callout )
+ (*proxy_extract_callout)( the_thread );
+ }
+ _Thread_queue_Extract( the_thread->Wait.queue, the_thread );
+
+ return TRUE;
+ }
+ return FALSE;
+}
+
+/*PAGE
+ *
+ * _Thread_queue_Extract
+ *
+ * This routine removes a specific thread from the specified threadq,
+ * deletes any timeout, and unblocks the thread.
+ *
+ * Input parameters:
+ * the_thread_queue - pointer to a threadq header
+ * the_thread - pointer to a thread control block
+ *
+ * Output parameters: NONE
+ *
+ * INTERRUPT LATENCY: NONE
+ */
+
+void _Thread_queue_Extract(
+ Thread_queue_Control *the_thread_queue,
+ Thread_Control *the_thread
+)
+{
+ switch ( the_thread_queue->discipline ) {
+ case THREAD_QUEUE_DISCIPLINE_FIFO:
+ _Thread_queue_Extract_fifo( the_thread_queue, the_thread );
+ break;
+ case THREAD_QUEUE_DISCIPLINE_PRIORITY:
+ _Thread_queue_Extract_priority( the_thread_queue, the_thread );
+ break;
+ }
+}
+
+/*PAGE
+ *
+ * _Thread_queue_Flush
+ *
+ * This kernel routine flushes the given thread queue.
+ *
+ * Input parameters:
+ * the_thread_queue - pointer to threadq to be flushed
+ * remote_extract_callout - pointer to routine which extracts a remote thread
+ * status - status to return to the thread
+ *
+ * Output parameters: NONE
+ */
+
+void _Thread_queue_Flush(
+ Thread_queue_Control *the_thread_queue,
+ Thread_queue_Flush_callout remote_extract_callout,
+ unsigned32 status
+)
+{
+ Thread_Control *the_thread;
+
+ while ( (the_thread = _Thread_queue_Dequeue( the_thread_queue )) ) {
+ if ( _Objects_Is_local_id( the_thread->Object.id ) )
+ the_thread->Wait.return_code = status;
+ else
+ ( *remote_extract_callout )( the_thread );
+ }
+}
+
+/*PAGE
+ *
+ * _Thread_queue_First
+ *
+ * This routines returns a pointer to the first thread on the
+ * specified threadq.
+ *
+ * Input parameters:
+ * the_thread_queue - pointer to thread queue
+ *
+ * Output parameters:
+ * returns - first thread or NULL
+ */
+
+Thread_Control *_Thread_queue_First(
+ Thread_queue_Control *the_thread_queue
+)
+{
+ Thread_Control *the_thread;
+
+ switch ( the_thread_queue->discipline ) {
+ case THREAD_QUEUE_DISCIPLINE_FIFO:
+ the_thread = _Thread_queue_First_fifo( the_thread_queue );
+ break;
+ case THREAD_QUEUE_DISCIPLINE_PRIORITY:
+ the_thread = _Thread_queue_First_priority( the_thread_queue );
+ break;
+ default: /* this is only to prevent warnings */
+ the_thread = NULL;
+ break;
+ }
+
+ return the_thread;
+}
+
+/*PAGE
+ *
+ * _Thread_queue_Timeout
+ *
+ * This routine processes a thread which timeouts while waiting on
+ * a thread queue. It is called by the watchdog handler.
+ *
+ * Input parameters:
+ * id - thread id
+ *
+ * Output parameters: NONE
+ */
+
+void _Thread_queue_Timeout(
+ Objects_Id id,
+ void *ignored
+)
+{
+ Thread_Control *the_thread;
+ Thread_queue_Control *the_thread_queue;
+ Objects_Locations location;
+
+ the_thread = _Thread_Get( id, &location );
+ switch ( location ) {
+ case OBJECTS_ERROR:
+ case OBJECTS_REMOTE: /* impossible */
+ break;
+ case OBJECTS_LOCAL:
+ the_thread_queue = the_thread->Wait.queue;
+
+ /*
+ * If the_thread_queue is not synchronized, then it is either
+ * "nothing happened", "timeout", or "satisfied". If the_thread
+ * is the executing thread, then it is in the process of blocking
+ * and it is the thread which is responsible for the synchronization
+ * process.
+ *
+ * If it is not satisfied, then it is "nothing happened" and
+ * this is the "timeout" transition. After a request is satisfied,
+ * a timeout is not allowed to occur.
+ */
+
+ if ( the_thread_queue->sync_state != THREAD_QUEUE_SYNCHRONIZED &&
+ _Thread_Is_executing( the_thread ) ) {
+ if ( the_thread_queue->sync_state != THREAD_QUEUE_SATISFIED )
+ the_thread_queue->sync_state = THREAD_QUEUE_TIMEOUT;
+ } else {
+ the_thread->Wait.return_code = the_thread->Wait.queue->timeout_status;
+ _Thread_queue_Extract( the_thread->Wait.queue, the_thread );
+ }
+ _Thread_Unnest_dispatch();
+ break;
+ }
+}
+
+/*PAGE
+ *
+ * _Thread_queue_Enqueue_fifo
+ *
+ * This routine blocks a thread, places it on a thread, and optionally
+ * starts a timeout timer.
+ *
+ * Input parameters:
+ * the_thread_queue - pointer to threadq
+ * the_thread - pointer to the thread to block
+ * timeout - interval to wait
+ *
+ * Output parameters: NONE
+ *
+ * INTERRUPT LATENCY:
+ * only case
+ */
+
+void _Thread_queue_Enqueue_fifo (
+ Thread_queue_Control *the_thread_queue,
+ Thread_Control *the_thread,
+ Watchdog_Interval timeout
+)
+{
+ ISR_Level level;
+ Thread_queue_States sync_state;
+
+ _ISR_Disable( level );
+
+ sync_state = the_thread_queue->sync_state;
+ the_thread_queue->sync_state = THREAD_QUEUE_SYNCHRONIZED;
+
+ switch ( sync_state ) {
+ case THREAD_QUEUE_SYNCHRONIZED:
+ /*
+ * This should never happen. It indicates that someone did not
+ * enter a thread queue critical section.
+ */
+ break;
+
+ case THREAD_QUEUE_NOTHING_HAPPENED:
+ _Chain_Append_unprotected(
+ &the_thread_queue->Queues.Fifo,
+ &the_thread->Object.Node
+ );
+ _ISR_Enable( level );
+ return;
+
+ case THREAD_QUEUE_TIMEOUT:
+ the_thread->Wait.return_code = the_thread->Wait.queue->timeout_status;
+ _ISR_Enable( level );
+ break;
+
+ case THREAD_QUEUE_SATISFIED:
+ if ( _Watchdog_Is_active( &the_thread->Timer ) ) {
+ _Watchdog_Deactivate( &the_thread->Timer );
+ _ISR_Enable( level );
+ (void) _Watchdog_Remove( &the_thread->Timer );
+ } else
+ _ISR_Enable( level );
+ break;
+ }
+
+ /*
+ * Global objects with thread queue's should not be operated on from an
+ * ISR. But the sync code still must allow short timeouts to be processed
+ * correctly.
+ */
+
+ _Thread_Unblock( the_thread );
+
+ if ( !_Objects_Is_local_id( the_thread->Object.id ) )
+ _Thread_MP_Free_proxy( the_thread );
+
+}
+
+/*PAGE
+ *
+ * _Thread_queue_Dequeue_fifo
+ *
+ * This routine removes a thread from the specified threadq.
+ *
+ * Input parameters:
+ * the_thread_queue - pointer to threadq
+ *
+ * Output parameters:
+ * returns - thread dequeued or NULL
+ *
+ * INTERRUPT LATENCY:
+ * check sync
+ * FIFO
+ */
+
+Thread_Control *_Thread_queue_Dequeue_fifo(
+ Thread_queue_Control *the_thread_queue
+)
+{
+ ISR_Level level;
+ Thread_Control *the_thread;
+
+ _ISR_Disable( level );
+ if ( !_Chain_Is_empty( &the_thread_queue->Queues.Fifo ) ) {
+
+ the_thread = (Thread_Control *)
+ _Chain_Get_first_unprotected( &the_thread_queue->Queues.Fifo );
+
+ if ( !_Watchdog_Is_active( &the_thread->Timer ) ) {
+ _ISR_Enable( level );
+ _Thread_Unblock( the_thread );
+ } else {
+ _Watchdog_Deactivate( &the_thread->Timer );
+ _ISR_Enable( level );
+ (void) _Watchdog_Remove( &the_thread->Timer );
+ _Thread_Unblock( the_thread );
+ }
+
+ if ( !_Objects_Is_local_id( the_thread->Object.id ) )
+ _Thread_MP_Free_proxy( the_thread );
+
+ return the_thread;
+ }
+
+ switch ( the_thread_queue->sync_state ) {
+ case THREAD_QUEUE_SYNCHRONIZED:
+ case THREAD_QUEUE_SATISFIED:
+ _ISR_Enable( level );
+ return NULL;
+
+ case THREAD_QUEUE_NOTHING_HAPPENED:
+ case THREAD_QUEUE_TIMEOUT:
+ the_thread_queue->sync_state = THREAD_QUEUE_SATISFIED;
+ _ISR_Enable( level );
+ return _Thread_Executing;
+ }
+ return NULL; /* this is only to prevent warnings */
+}
+
+/*PAGE
+ *
+ * _Thread_queue_Extract_fifo
+ *
+ * This routine removes a specific thread from the specified threadq,
+ * deletes any timeout, and unblocks the thread.
+ *
+ * Input parameters:
+ * the_thread_queue - pointer to a threadq header
+ * the_thread - pointer to the thread to block
+ *
+ * Output parameters: NONE
+ *
+ * INTERRUPT LATENCY:
+ * EXTRACT_FIFO
+ */
+
+void _Thread_queue_Extract_fifo(
+ Thread_queue_Control *the_thread_queue,
+ Thread_Control *the_thread
+)
+{
+ ISR_Level level;
+
+ _ISR_Disable( level );
+
+ if ( !_States_Is_waiting_on_thread_queue( the_thread->current_state ) ) {
+ _ISR_Enable( level );
+ return;
+ }
+
+ _Chain_Extract_unprotected( &the_thread->Object.Node );
+
+ if ( !_Watchdog_Is_active( &the_thread->Timer ) ) {
+ _ISR_Enable( level );
+ } else {
+ _Watchdog_Deactivate( &the_thread->Timer );
+ _ISR_Enable( level );
+ (void) _Watchdog_Remove( &the_thread->Timer );
+ }
+
+ _Thread_Unblock( the_thread );
+
+ if ( !_Objects_Is_local_id( the_thread->Object.id ) )
+ _Thread_MP_Free_proxy( the_thread );
+
+}
+
+/*PAGE
+ *
+ * _Thread_queue_First_fifo
+ *
+ * This routines returns a pointer to the first thread on the
+ * specified threadq.
+ *
+ * Input parameters:
+ * the_thread_queue - pointer to threadq
+ *
+ * Output parameters:
+ * returns - first thread or NULL
+ */
+
+Thread_Control *_Thread_queue_First_fifo(
+ Thread_queue_Control *the_thread_queue
+)
+{
+ if ( !_Chain_Is_empty( &the_thread_queue->Queues.Fifo ) )
+ return (Thread_Control *) the_thread_queue->Queues.Fifo.first;
+
+ return NULL;
+}
+
+/*PAGE
+ *
+ * _Thread_queue_Enqueue_priority
+ *
+ * This routine blocks a thread, places it on a thread, and optionally
+ * starts a timeout timer.
+ *
+ * Input parameters:
+ * the_thread_queue - pointer to threadq
+ * thread - thread to insert
+ * timeout - timeout interval in ticks
+ *
+ * Output parameters: NONE
+ *
+ * INTERRUPT LATENCY:
+ * forward less than
+ * forward equal
+ */
+
+void _Thread_queue_Enqueue_priority(
+ Thread_queue_Control *the_thread_queue,
+ Thread_Control *the_thread,
+ Watchdog_Interval timeout
+)
+{
+ Priority_Control search_priority;
+ Thread_Control *search_thread;
+ ISR_Level level;
+ Chain_Control *header;
+ unsigned32 header_index;
+ Chain_Node *the_node;
+ Chain_Node *next_node;
+ Chain_Node *previous_node;
+ Chain_Node *search_node;
+ Priority_Control priority;
+ States_Control block_state;
+ Thread_queue_States sync_state;
+
+ _Chain_Initialize_empty( &the_thread->Wait.Block2n );
+
+ priority = the_thread->current_priority;
+ header_index = _Thread_queue_Header_number( priority );
+ header = &the_thread_queue->Queues.Priority[ header_index ];
+ block_state = the_thread_queue->state;
+
+ if ( _Thread_queue_Is_reverse_search( priority ) )
+ goto restart_reverse_search;
+
+restart_forward_search:
+ search_priority = PRIORITY_MINIMUM - 1;
+ _ISR_Disable( level );
+ search_thread = (Thread_Control *) header->first;
+ while ( !_Chain_Is_tail( header, (Chain_Node *)search_thread ) ) {
+ search_priority = search_thread->current_priority;
+ if ( priority <= search_priority )
+ break;
+
+#if ( CPU_UNROLL_ENQUEUE_PRIORITY == TRUE )
+ search_thread = (Thread_Control *) search_thread->Object.Node.next;
+ if ( _Chain_Is_tail( header, (Chain_Node *)search_thread ) )
+ break;
+ search_priority = search_thread->current_priority;
+ if ( priority <= search_priority )
+ break;
+#endif
+ _ISR_Flash( level );
+ if ( !_States_Are_set( search_thread->current_state, block_state) ) {
+ _ISR_Enable( level );
+ goto restart_forward_search;
+ }
+ search_thread =
+ (Thread_Control *)search_thread->Object.Node.next;
+ }
+
+ if ( the_thread_queue->sync_state != THREAD_QUEUE_NOTHING_HAPPENED )
+ goto synchronize;
+
+ the_thread_queue->sync_state = THREAD_QUEUE_SYNCHRONIZED;
+
+ if ( priority == search_priority )
+ goto equal_priority;
+
+ search_node = (Chain_Node *) search_thread;
+ previous_node = search_node->previous;
+ the_node = (Chain_Node *) the_thread;
+
+ the_node->next = search_node;
+ the_node->previous = previous_node;
+ previous_node->next = the_node;
+ search_node->previous = the_node;
+ _ISR_Enable( level );
+ return;
+
+restart_reverse_search:
+ search_priority = PRIORITY_MAXIMUM + 1;
+
+ _ISR_Disable( level );
+ search_thread = (Thread_Control *) header->last;
+ while ( !_Chain_Is_head( header, (Chain_Node *)search_thread ) ) {
+ search_priority = search_thread->current_priority;
+ if ( priority >= search_priority )
+ break;
+#if ( CPU_UNROLL_ENQUEUE_PRIORITY == TRUE )
+ search_thread = (Thread_Control *) search_thread->Object.Node.previous;
+ if ( _Chain_Is_head( header, (Chain_Node *)search_thread ) )
+ break;
+ search_priority = search_thread->current_priority;
+ if ( priority >= search_priority )
+ break;
+#endif
+ _ISR_Flash( level );
+ if ( !_States_Are_set( search_thread->current_state, block_state) ) {
+ _ISR_Enable( level );
+ goto restart_reverse_search;
+ }
+ search_thread = (Thread_Control *)
+ search_thread->Object.Node.previous;
+ }
+
+ if ( the_thread_queue->sync_state != THREAD_QUEUE_NOTHING_HAPPENED )
+ goto synchronize;
+
+ the_thread_queue->sync_state = THREAD_QUEUE_SYNCHRONIZED;
+
+ if ( priority == search_priority )
+ goto equal_priority;
+
+ search_node = (Chain_Node *) search_thread;
+ next_node = search_node->next;
+ the_node = (Chain_Node *) the_thread;
+
+ the_node->next = next_node;
+ the_node->previous = search_node;
+ search_node->next = the_node;
+ next_node->previous = the_node;
+ _ISR_Enable( level );
+ return;
+
+equal_priority: /* add at end of priority group */
+ search_node = _Chain_Tail( &search_thread->Wait.Block2n );
+ previous_node = search_node->previous;
+ the_node = (Chain_Node *) the_thread;
+
+ the_node->next = search_node;
+ the_node->previous = previous_node;
+ previous_node->next = the_node;
+ search_node->previous = the_node;
+ _ISR_Enable( level );
+ return;
+
+synchronize:
+
+ sync_state = the_thread_queue->sync_state;
+ the_thread_queue->sync_state = THREAD_QUEUE_SYNCHRONIZED;
+
+ switch ( sync_state ) {
+ case THREAD_QUEUE_SYNCHRONIZED:
+ /*
+ * This should never happen. It indicates that someone did not
+ * enter a thread queue critical section.
+ */
+ break;
+
+ case THREAD_QUEUE_NOTHING_HAPPENED:
+ /*
+ * This should never happen. All of this was dealt with above.
+ */
+ break;
+
+ case THREAD_QUEUE_TIMEOUT:
+ the_thread->Wait.return_code = the_thread->Wait.queue->timeout_status;
+ _ISR_Enable( level );
+ break;
+
+ case THREAD_QUEUE_SATISFIED:
+ if ( _Watchdog_Is_active( &the_thread->Timer ) ) {
+ _Watchdog_Deactivate( &the_thread->Timer );
+ _ISR_Enable( level );
+ (void) _Watchdog_Remove( &the_thread->Timer );
+ } else
+ _ISR_Enable( level );
+ break;
+ }
+
+ /*
+ * Global objects with thread queue's should not be operated on from an
+ * ISR. But the sync code still must allow short timeouts to be processed
+ * correctly.
+ */
+
+ _Thread_Unblock( the_thread );
+
+ if ( !_Objects_Is_local_id( the_thread->Object.id ) )
+ _Thread_MP_Free_proxy( the_thread );
+}
+
+/*PAGE
+ *
+ * _Thread_queue_Dequeue_priority
+ *
+ * This routine removes a thread from the specified PRIORITY based
+ * threadq, unblocks it, and cancels its timeout timer.
+ *
+ * Input parameters:
+ * the_thread_queue - pointer to thread queue
+ *
+ * Output parameters:
+ * returns - thread dequeued or NULL
+ *
+ * INTERRUPT LATENCY:
+ * only case
+ */
+
+Thread_Control *_Thread_queue_Dequeue_priority(
+ Thread_queue_Control *the_thread_queue
+)
+{
+ unsigned32 index;
+ ISR_Level level;
+ Thread_Control *the_thread = NULL; /* just to remove warnings */
+ Thread_Control *new_first_thread;
+ Chain_Node *new_first_node;
+ Chain_Node *new_second_node;
+ Chain_Node *last_node;
+ Chain_Node *next_node;
+ Chain_Node *previous_node;
+
+ _ISR_Disable( level );
+ for( index=0 ;
+ index < TASK_QUEUE_DATA_NUMBER_OF_PRIORITY_HEADERS ;
+ index++ ) {
+ if ( !_Chain_Is_empty( &the_thread_queue->Queues.Priority[ index ] ) ) {
+ the_thread = (Thread_Control *)
+ the_thread_queue->Queues.Priority[ index ].first;
+ goto dequeue;
+ }
+ }
+
+ switch ( the_thread_queue->sync_state ) {
+ case THREAD_QUEUE_SYNCHRONIZED:
+ case THREAD_QUEUE_SATISFIED:
+ _ISR_Enable( level );
+ return NULL;
+
+ case THREAD_QUEUE_NOTHING_HAPPENED:
+ case THREAD_QUEUE_TIMEOUT:
+ the_thread_queue->sync_state = THREAD_QUEUE_SATISFIED;
+ _ISR_Enable( level );
+ return _Thread_Executing;
+ }
+
+dequeue:
+ new_first_node = the_thread->Wait.Block2n.first;
+ new_first_thread = (Thread_Control *) new_first_node;
+ next_node = the_thread->Object.Node.next;
+ previous_node = the_thread->Object.Node.previous;
+
+ if ( !_Chain_Is_empty( &the_thread->Wait.Block2n ) ) {
+ last_node = the_thread->Wait.Block2n.last;
+ new_second_node = new_first_node->next;
+
+ previous_node->next = new_first_node;
+ next_node->previous = new_first_node;
+ new_first_node->next = next_node;
+ new_first_node->previous = previous_node;
+
+ if ( !_Chain_Has_only_one_node( &the_thread->Wait.Block2n ) ) {
+ /* > two threads on 2-n */
+ new_second_node->previous =
+ _Chain_Head( &new_first_thread->Wait.Block2n );
+
+ new_first_thread->Wait.Block2n.first = new_second_node;
+ new_first_thread->Wait.Block2n.last = last_node;
+
+ last_node->next = _Chain_Tail( &new_first_thread->Wait.Block2n );
+ }
+ } else {
+ previous_node->next = next_node;
+ next_node->previous = previous_node;
+ }
+
+ if ( !_Watchdog_Is_active( &the_thread->Timer ) ) {
+ _ISR_Enable( level );
+ _Thread_Unblock( the_thread );
+ } else {
+ _Watchdog_Deactivate( &the_thread->Timer );
+ _ISR_Enable( level );
+ (void) _Watchdog_Remove( &the_thread->Timer );
+ _Thread_Unblock( the_thread );
+ }
+
+ if ( !_Objects_Is_local_id( the_thread->Object.id ) )
+ _Thread_MP_Free_proxy( the_thread );
+ return( the_thread );
+}
+
+/*PAGE
+ *
+ * _Thread_queue_Extract_priority
+ *
+ * This routine removes a specific thread from the specified threadq,
+ * deletes any timeout, and unblocks the thread.
+ *
+ * Input parameters:
+ * the_thread_queue - pointer to a threadq header
+ * the_thread - pointer to a thread control block
+ *
+ * Output parameters: NONE
+ *
+ * INTERRUPT LATENCY:
+ * EXTRACT_PRIORITY
+ */
+
+void _Thread_queue_Extract_priority(
+ Thread_queue_Control *the_thread_queue,
+ Thread_Control *the_thread
+)
+{
+ ISR_Level level;
+ Chain_Node *the_node;
+ Chain_Node *next_node;
+ Chain_Node *previous_node;
+ Thread_Control *new_first_thread;
+ Chain_Node *new_first_node;
+ Chain_Node *new_second_node;
+ Chain_Node *last_node;
+
+ the_node = (Chain_Node *) the_thread;
+ _ISR_Disable( level );
+ if ( _States_Is_waiting_on_thread_queue( the_thread->current_state ) ) {
+ next_node = the_node->next;
+ previous_node = the_node->previous;
+
+ if ( !_Chain_Is_empty( &the_thread->Wait.Block2n ) ) {
+ new_first_node = the_thread->Wait.Block2n.first;
+ new_first_thread = (Thread_Control *) new_first_node;
+ last_node = the_thread->Wait.Block2n.last;
+ new_second_node = new_first_node->next;
+
+ previous_node->next = new_first_node;
+ next_node->previous = new_first_node;
+ new_first_node->next = next_node;
+ new_first_node->previous = previous_node;
+
+ if ( !_Chain_Has_only_one_node( &the_thread->Wait.Block2n ) ) {
+ /* > two threads on 2-n */
+ new_second_node->previous =
+ _Chain_Head( &new_first_thread->Wait.Block2n );
+ new_first_thread->Wait.Block2n.first = new_second_node;
+
+ new_first_thread->Wait.Block2n.last = last_node;
+ last_node->next = _Chain_Tail( &new_first_thread->Wait.Block2n );
+ }
+ } else {
+ previous_node->next = next_node;
+ next_node->previous = previous_node;
+ }
+
+ if ( !_Watchdog_Is_active( &the_thread->Timer ) ) {
+ _ISR_Enable( level );
+ _Thread_Unblock( the_thread );
+ } else {
+ _Watchdog_Deactivate( &the_thread->Timer );
+ _ISR_Enable( level );
+ (void) _Watchdog_Remove( &the_thread->Timer );
+ _Thread_Unblock( the_thread );
+ }
+
+ if ( !_Objects_Is_local_id( the_thread->Object.id ) )
+ _Thread_MP_Free_proxy( the_thread );
+ }
+ else
+ _ISR_Enable( level );
+}
+
+/*PAGE
+ *
+ * _Thread_queue_First_priority
+ *
+ * This routines returns a pointer to the first thread on the
+ * specified threadq.
+ *
+ * Input parameters:
+ * the_thread_queue - pointer to thread queue
+ *
+ * Output parameters:
+ * returns - first thread or NULL
+ */
+
+Thread_Control *_Thread_queue_First_priority (
+ Thread_queue_Control *the_thread_queue
+)
+{
+ unsigned32 index;
+
+ for( index=0 ;
+ index < TASK_QUEUE_DATA_NUMBER_OF_PRIORITY_HEADERS ;
+ index++ ) {
+ if ( !_Chain_Is_empty( &the_thread_queue->Queues.Priority[ index ] ) )
+ return (Thread_Control *)
+ the_thread_queue->Queues.Priority[ index ].first;
+ }
+ return NULL;
+}
diff --git a/c/src/exec/score/src/tod.c b/c/src/exec/score/src/tod.c
new file mode 100644
index 0000000000..1a11034ceb
--- /dev/null
+++ b/c/src/exec/score/src/tod.c
@@ -0,0 +1,235 @@
+/*
+ * Time of Day (TOD) Handler
+ *
+ *
+ * COPYRIGHT (c) 1989, 1990, 1991, 1992, 1993, 1994.
+ * On-Line Applications Research Corporation (OAR).
+ * All rights assigned to U.S. Government, 1994.
+ *
+ * This material may be reproduced by or for the U.S. Government pursuant
+ * to the copyright license under the clause at DFARS 252.227-7013. This
+ * notice must appear in all copies of this file and its derivatives.
+ *
+ * $Id$
+ */
+
+#include <rtems/system.h>
+#include <rtems/score/object.h>
+#include <rtems/score/thread.h>
+#include <rtems/score/tod.h>
+#include <rtems/score/watchdog.h>
+
+/*PAGE
+ *
+ * _TOD_Handler_initialization
+ *
+ * This routine initializes the time of day handler.
+ *
+ * Input parameters:
+ * microseconds_per_tick - microseconds between clock ticks
+ *
+ * Output parameters: NONE
+ */
+
+void _TOD_Handler_initialization(
+ unsigned32 microseconds_per_tick
+)
+{
+ _TOD_Microseconds_per_tick = microseconds_per_tick;
+
+ _TOD_Ticks_since_boot = 0;
+ _TOD_Seconds_since_epoch = 0;
+
+ _TOD_Current.year = TOD_BASE_YEAR;
+ _TOD_Current.month = 1;
+ _TOD_Current.day = 1;
+ _TOD_Current.hour = 0;
+ _TOD_Current.minute = 0;
+ _TOD_Current.second = 0;
+ _TOD_Current.ticks = 0;
+
+ if ( microseconds_per_tick == 0 )
+ _TOD_Ticks_per_second = 0;
+ else
+ _TOD_Ticks_per_second =
+ TOD_MICROSECONDS_PER_SECOND / microseconds_per_tick;
+
+ _Watchdog_Initialize( &_TOD_Seconds_watchdog, _TOD_Tickle, 0, NULL );
+}
+
+/*PAGE
+ *
+ * _TOD_Set
+ *
+ * This rountine sets the current date and time with the specified
+ * new date and time structure.
+ *
+ * Input parameters:
+ * the_tod - pointer to the time and date structure
+ * seconds_since_epoch - seconds since system epoch
+ *
+ * Output parameters: NONE
+ */
+
+void _TOD_Set(
+ TOD_Control *the_tod,
+ Watchdog_Interval seconds_since_epoch
+)
+{
+ Watchdog_Interval ticks_until_next_second;
+
+ _Thread_Disable_dispatch();
+ _TOD_Deactivate();
+
+ if ( seconds_since_epoch < _TOD_Seconds_since_epoch )
+ _Watchdog_Adjust_seconds( WATCHDOG_BACKWARD,
+ _TOD_Seconds_since_epoch - seconds_since_epoch );
+ else
+ _Watchdog_Adjust_seconds( WATCHDOG_FORWARD,
+ seconds_since_epoch - _TOD_Seconds_since_epoch );
+
+ ticks_until_next_second = _TOD_Ticks_per_second;
+ if ( ticks_until_next_second > _TOD_Current.ticks )
+ ticks_until_next_second -= _TOD_Current.ticks;
+
+ _TOD_Current = *the_tod;
+ _TOD_Seconds_since_epoch = seconds_since_epoch;
+ _TOD_Activate( ticks_until_next_second );
+
+ _Thread_Enable_dispatch();
+}
+
+/*PAGE
+ *
+ * _TOD_Validate
+ *
+ * This kernel routine checks the validity of a date and time structure.
+ *
+ * Input parameters:
+ * the_tod - pointer to a time and date structure
+ *
+ * Output parameters:
+ * TRUE - if the date, time, and tick are valid
+ * FALSE - if the the_tod is invalid
+ *
+ * NOTE: This routine only works for leap-years through 2099.
+ */
+
+boolean _TOD_Validate(
+ TOD_Control *the_tod
+)
+{
+ unsigned32 days_in_month;
+
+ if ((the_tod->ticks >= _TOD_Ticks_per_second) ||
+ (the_tod->second >= TOD_SECONDS_PER_MINUTE) ||
+ (the_tod->minute >= TOD_MINUTES_PER_HOUR) ||
+ (the_tod->hour >= TOD_HOURS_PER_DAY) ||
+ (the_tod->month == 0) ||
+ (the_tod->month > TOD_MONTHS_PER_YEAR) ||
+ (the_tod->year < TOD_BASE_YEAR) ||
+ (the_tod->day == 0) )
+ return FALSE;
+
+ if ( (the_tod->year % 4) == 0 )
+ days_in_month = _TOD_Days_per_month[ 1 ][ the_tod->month ];
+ else
+ days_in_month = _TOD_Days_per_month[ 0 ][ the_tod->month ];
+
+ if ( the_tod->day > days_in_month )
+ return FALSE;
+
+ return TRUE;
+}
+
+/*PAGE
+ *
+ * _TOD_To_seconds
+ *
+ * This routine returns the seconds from the epoch until the
+ * current date and time.
+ *
+ * Input parameters:
+ * the_tod - pointer to the time and date structure
+ *
+ * Output parameters:
+ * returns - seconds since epoch until the_tod
+ */
+
+unsigned32 _TOD_To_seconds(
+ TOD_Control *the_tod
+)
+{
+ unsigned32 time;
+ unsigned32 year_mod_4;
+
+ time = the_tod->day - 1;
+ year_mod_4 = the_tod->year & 3;
+
+ if ( year_mod_4 == 0 )
+ time += _TOD_Days_to_date[ 1 ][ the_tod->month ];
+ else
+ time += _TOD_Days_to_date[ 0 ][ the_tod->month ];
+
+ time += ( (the_tod->year - TOD_BASE_YEAR) / 4 ) *
+ ( (TOD_DAYS_PER_YEAR * 4) + 1);
+
+ time += _TOD_Days_since_last_leap_year[ year_mod_4 ];
+
+ time *= TOD_SECONDS_PER_DAY;
+
+ time += ((the_tod->hour * TOD_MINUTES_PER_HOUR) + the_tod->minute)
+ * TOD_SECONDS_PER_MINUTE;
+
+ time += the_tod->second;
+
+ return( time );
+}
+
+/*PAGE
+ *
+ * _TOD_Tickle
+ *
+ * This routine updates the calendar time and tickles the
+ * per second watchdog timer chain.
+ *
+ * Input parameters:
+ * ignored - this parameter is ignored
+ *
+ * Output parameters: NONE
+ *
+ * NOTE: This routine only works for leap-years through 2099.
+ */
+
+void _TOD_Tickle(
+ Objects_Id id,
+ void *ignored
+)
+{
+ unsigned32 leap;
+
+ _TOD_Current.ticks = 0;
+ ++_TOD_Seconds_since_epoch;
+ if ( ++_TOD_Current.second >= TOD_SECONDS_PER_MINUTE ) {
+ _TOD_Current.second = 0;
+ if ( ++_TOD_Current.minute >= TOD_MINUTES_PER_HOUR ) {
+ _TOD_Current.minute = 0;
+ if ( ++_TOD_Current.hour >= TOD_HOURS_PER_DAY ) {
+ _TOD_Current.hour = 0;
+ if ( _TOD_Current.year & 0x3 ) leap = 0;
+ else leap = 1;
+ if ( ++_TOD_Current.day >
+ _TOD_Days_per_month[ leap ][ _TOD_Current.month ]) {
+ _TOD_Current.day = 1;
+ if ( ++_TOD_Current.month > TOD_MONTHS_PER_YEAR ) {
+ _TOD_Current.month = 1;
+ _TOD_Current.year++;
+ }
+ }
+ }
+ }
+ }
+
+ _Watchdog_Tickle_seconds();
+ _Watchdog_Insert_ticks( &_TOD_Seconds_watchdog, _TOD_Ticks_per_second );
+}
diff --git a/c/src/exec/score/src/userext.c b/c/src/exec/score/src/userext.c
new file mode 100644
index 0000000000..6261220d58
--- /dev/null
+++ b/c/src/exec/score/src/userext.c
@@ -0,0 +1,204 @@
+/*
+ * User Extension Handler
+ *
+ * NOTE: XXX
+ *
+ * COPYRIGHT (c) 1989, 1990, 1991, 1992, 1993, 1994.
+ * On-Line Applications Research Corporation (OAR).
+ * All rights assigned to U.S. Government, 1994.
+ *
+ * This material may be reproduced by or for the U.S. Government pursuant
+ * to the copyright license under the clause at DFARS 252.227-7013. This
+ * notice must appear in all copies of this file and its derivatives.
+ *
+ * $Id$
+ */
+
+#include <rtems/system.h>
+#include <rtems/score/userext.h>
+
+/*PAGE
+ *
+ * _User_extensions_Thread_create
+ */
+
+boolean _User_extensions_Thread_create (
+ Thread_Control *the_thread
+)
+{
+ Chain_Node *the_node;
+ User_extensions_Control *the_extension;
+ boolean status;
+
+ for ( the_node = _User_extensions_List.first ;
+ !_Chain_Is_tail( &_User_extensions_List, the_node ) ;
+ the_node = the_node->next ) {
+
+ the_extension = (User_extensions_Control *) the_node;
+
+ if ( the_extension->Callouts.thread_create != NULL ) {
+ status = (*the_extension->Callouts.thread_create)(
+ _Thread_Executing,
+ the_thread
+ );
+ if ( !status )
+ return FALSE;
+ }
+ }
+
+ return TRUE;
+}
+
+/*PAGE
+ *
+ * _User_extensions_Thread_delete
+ */
+
+void _User_extensions_Thread_delete (
+ Thread_Control *the_thread
+)
+{
+ Chain_Node *the_node;
+ User_extensions_Control *the_extension;
+
+ for ( the_node = _User_extensions_List.last ;
+ !_Chain_Is_head( &_User_extensions_List, the_node ) ;
+ the_node = the_node->previous ) {
+
+ the_extension = (User_extensions_Control *) the_node;
+
+ if ( the_extension->Callouts.thread_delete != NULL )
+ (*the_extension->Callouts.thread_delete)(
+ _Thread_Executing,
+ the_thread
+ );
+ }
+}
+
+/*PAGE
+ *
+ * _User_extensions_Thread_start
+ *
+ */
+
+void _User_extensions_Thread_start (
+ Thread_Control *the_thread
+)
+{
+ Chain_Node *the_node;
+ User_extensions_Control *the_extension;
+
+ for ( the_node = _User_extensions_List.first ;
+ !_Chain_Is_tail( &_User_extensions_List, the_node ) ;
+ the_node = the_node->next ) {
+
+ the_extension = (User_extensions_Control *) the_node;
+
+ if ( the_extension->Callouts.thread_start != NULL )
+ (*the_extension->Callouts.thread_start)(
+ _Thread_Executing,
+ the_thread
+ );
+ }
+}
+
+/*PAGE
+ *
+ * _User_extensions_Thread_restart
+ *
+ */
+
+void _User_extensions_Thread_restart (
+ Thread_Control *the_thread
+)
+{
+ Chain_Node *the_node;
+ User_extensions_Control *the_extension;
+
+ for ( the_node = _User_extensions_List.first ;
+ !_Chain_Is_tail( &_User_extensions_List, the_node ) ;
+ the_node = the_node->next ) {
+
+ the_extension = (User_extensions_Control *) the_node;
+
+ if ( the_extension->Callouts.thread_restart != NULL )
+ (*the_extension->Callouts.thread_restart)(
+ _Thread_Executing,
+ the_thread
+ );
+ }
+}
+
+/*PAGE
+ *
+ * _User_extensions_Thread_begin
+ *
+ */
+
+void _User_extensions_Thread_begin (
+ Thread_Control *executing
+)
+{
+ Chain_Node *the_node;
+ User_extensions_Control *the_extension;
+
+ for ( the_node = _User_extensions_List.first ;
+ !_Chain_Is_tail( &_User_extensions_List, the_node ) ;
+ the_node = the_node->next ) {
+
+ the_extension = (User_extensions_Control *) the_node;
+
+ if ( the_extension->Callouts.thread_begin != NULL )
+ (*the_extension->Callouts.thread_begin)( executing );
+ }
+}
+
+/*PAGE
+ *
+ * _User_extensions_Thread_exitted
+ */
+
+void _User_extensions_Thread_exitted (
+ Thread_Control *executing
+)
+{
+ Chain_Node *the_node;
+ User_extensions_Control *the_extension;
+
+ for ( the_node = _User_extensions_List.last ;
+ !_Chain_Is_head( &_User_extensions_List, the_node ) ;
+ the_node = the_node->previous ) {
+
+ the_extension = (User_extensions_Control *) the_node;
+
+ if ( the_extension->Callouts.thread_exitted != NULL )
+ (*the_extension->Callouts.thread_exitted)( executing );
+ }
+}
+
+/*PAGE
+ *
+ * _User_extensions_Fatal
+ */
+
+void _User_extensions_Fatal (
+ Internal_errors_Source the_source,
+ boolean is_internal,
+ unsigned32 the_error
+)
+{
+ Chain_Node *the_node;
+ User_extensions_Control *the_extension;
+
+ for ( the_node = _User_extensions_List.last ;
+ !_Chain_Is_head( &_User_extensions_List, the_node ) ;
+ the_node = the_node->previous ) {
+
+ the_extension = (User_extensions_Control *) the_node;
+
+ if ( the_extension->Callouts.fatal != NULL )
+ (*the_extension->Callouts.fatal)( the_source, is_internal, the_error );
+ }
+}
+
+
diff --git a/c/src/exec/score/src/watchdog.c b/c/src/exec/score/src/watchdog.c
new file mode 100644
index 0000000000..0f72bd3d5c
--- /dev/null
+++ b/c/src/exec/score/src/watchdog.c
@@ -0,0 +1,267 @@
+/*
+ * Watchdog Handler
+ *
+ *
+ * COPYRIGHT (c) 1989, 1990, 1991, 1992, 1993, 1994.
+ * On-Line Applications Research Corporation (OAR).
+ * All rights assigned to U.S. Government, 1994.
+ *
+ * This material may be reproduced by or for the U.S. Government pursuant
+ * to the copyright license under the clause at DFARS 252.227-7013. This
+ * notice must appear in all copies of this file and its derivatives.
+ *
+ * $Id$
+ */
+
+#include <rtems/system.h>
+#include <rtems/score/isr.h>
+#include <rtems/score/watchdog.h>
+
+/*PAGE
+ *
+ * _Watchdog_Handler_initialization
+ *
+ * This routine initializes the watchdog handler.
+ *
+ * Input parameters: NONE
+ *
+ * Output parameters: NONE
+ */
+
+void _Watchdog_Handler_initialization( void )
+{
+ _Watchdog_Sync_count = 0;
+ _Watchdog_Sync_level = 0;
+ _Chain_Initialize_empty( &_Watchdog_Ticks_chain );
+ _Chain_Initialize_empty( &_Watchdog_Seconds_chain );
+}
+
+/*PAGE
+ *
+ * _Watchdog_Remove
+ *
+ * The routine removes a watchdog from a delta chain and updates
+ * the delta counters of the remaining watchdogs.
+ */
+
+Watchdog_States _Watchdog_Remove(
+ Watchdog_Control *the_watchdog
+)
+{
+ ISR_Level level;
+ Watchdog_States previous_state;
+ Watchdog_Control *next_watchdog;
+
+ _ISR_Disable( level );
+ previous_state = the_watchdog->state;
+ switch ( previous_state ) {
+ case WATCHDOG_INACTIVE:
+ break;
+
+ case WATCHDOG_BEING_INSERTED:
+
+ /*
+ * It is not actually on the chain so just change the state and
+ * the Insert operation we interrupted will be aborted.
+ */
+ the_watchdog->state = WATCHDOG_INACTIVE;
+ break;
+
+ case WATCHDOG_ACTIVE:
+ case WATCHDOG_REMOVE_IT:
+
+ the_watchdog->state = WATCHDOG_INACTIVE;
+ next_watchdog = _Watchdog_Next( the_watchdog );
+
+ if ( _Watchdog_Next(next_watchdog) )
+ next_watchdog->delta_interval += the_watchdog->delta_interval;
+
+ if ( _Watchdog_Sync_count )
+ _Watchdog_Sync_level = _ISR_Nest_level;
+
+ _Chain_Extract_unprotected( &the_watchdog->Node );
+ break;
+ }
+ _ISR_Enable( level );
+ return( previous_state );
+}
+
+/*PAGE
+ *
+ * _Watchdog_Adjust
+ *
+ * This routine adjusts the delta chain backward or forward in response
+ * to a time change.
+ *
+ * Input parameters:
+ * header - pointer to the delta chain to be adjusted
+ * direction - forward or backward adjustment to delta chain
+ * units - units to adjust
+ *
+ * Output parameters:
+ */
+
+void _Watchdog_Adjust(
+ Chain_Control *header,
+ Watchdog_Adjust_directions direction,
+ Watchdog_Interval units
+)
+{
+ if ( !_Chain_Is_empty( header ) ) {
+ switch ( direction ) {
+ case WATCHDOG_BACKWARD:
+ _Watchdog_First( header )->delta_interval += units;
+ break;
+ case WATCHDOG_FORWARD:
+ while ( units ) {
+ if ( units < _Watchdog_First( header )->delta_interval ) {
+ _Watchdog_First( header )->delta_interval -= units;
+ break;
+ } else {
+ units -= _Watchdog_First( header )->delta_interval;
+ _Watchdog_First( header )->delta_interval = 1;
+ _Watchdog_Tickle( header );
+ if ( _Chain_Is_empty( header ) )
+ break;
+ }
+ }
+ break;
+ }
+ }
+}
+
+/*PAGE
+ *
+ * _Watchdog_Insert
+ *
+ * This routine inserts a watchdog timer on to the appropriate delta
+ * chain while updating the delta interval counters.
+ */
+
+void _Watchdog_Insert(
+ Chain_Control *header,
+ Watchdog_Control *the_watchdog
+)
+{
+ ISR_Level level;
+ Watchdog_Control *after;
+ unsigned32 insert_isr_nest_level;
+ Watchdog_Interval delta_interval;
+
+
+ insert_isr_nest_level = _ISR_Nest_level;
+ the_watchdog->state = WATCHDOG_BEING_INSERTED;
+
+ _Watchdog_Sync_count++;
+restart:
+ delta_interval = the_watchdog->initial;
+
+ _ISR_Disable( level );
+
+ for ( after = _Watchdog_First( header ) ;
+ ;
+ after = _Watchdog_Next( after ) ) {
+
+ if ( delta_interval == 0 || !_Watchdog_Next( after ) )
+ break;
+
+ if ( delta_interval < after->delta_interval ) {
+ after->delta_interval -= delta_interval;
+ break;
+ }
+
+ delta_interval -= after->delta_interval;
+
+ /*
+ * If you experience problems comment out the _ISR_Flash line.
+ * 3.2.0 was the first release with this critical section redesigned.
+ * Under certain circumstances, the PREVIOUS critical section algorithm
+ * used around this flash point allowed interrupts to execute
+ * which violated the design assumptions. The critical section
+ * mechanism used here WAS redesigned to address this.
+ */
+
+ _ISR_Flash( level );
+
+ if ( the_watchdog->state != WATCHDOG_BEING_INSERTED ) {
+ goto exit_insert;
+ }
+
+ if ( _Watchdog_Sync_level > insert_isr_nest_level ) {
+ _Watchdog_Sync_level = insert_isr_nest_level;
+ _ISR_Enable( level );
+ goto restart;
+ }
+ }
+
+ _Watchdog_Activate( the_watchdog );
+
+ the_watchdog->delta_interval = delta_interval;
+
+ _Chain_Insert_unprotected( after->Node.previous, &the_watchdog->Node );
+
+exit_insert:
+ _Watchdog_Sync_level = insert_isr_nest_level;
+ _Watchdog_Sync_count--;
+ _ISR_Enable( level );
+}
+
+/*PAGE
+ *
+ * _Watchdog_Tickle
+ *
+ * This routine decrements the delta counter in response to a tick. The
+ * delta chain is updated accordingly.
+ *
+ * Input parameters:
+ * header - pointer to the delta chain to be tickled
+ *
+ * Output parameters: NONE
+ */
+
+void _Watchdog_Tickle(
+ Chain_Control *header
+)
+{
+ Watchdog_Control *the_watchdog;
+
+ if ( _Chain_Is_empty( header ) )
+ return;
+
+ the_watchdog = _Watchdog_First( header );
+ the_watchdog->delta_interval--;
+ if ( the_watchdog->delta_interval != 0 )
+ return;
+
+ do {
+ switch( _Watchdog_Remove( the_watchdog ) ) {
+ case WATCHDOG_ACTIVE:
+ (*the_watchdog->routine)(
+ the_watchdog->id,
+ the_watchdog->user_data
+ );
+ break;
+
+ case WATCHDOG_INACTIVE:
+ /*
+ * This state indicates that the watchdog is not on any chain.
+ * Thus, it is NOT on a chain being tickled. This case should
+ * never occur.
+ */
+ break;
+
+ case WATCHDOG_BEING_INSERTED:
+ /*
+ * This state indicates that the watchdog is in the process of
+ * BEING inserted on the chain. Thus, it can NOT be on a chain
+ * being tickled. This case should never occur.
+ */
+ break;
+
+ case WATCHDOG_REMOVE_IT:
+ break;
+ }
+ the_watchdog = _Watchdog_First( header );
+ } while ( !_Chain_Is_empty( header ) &&
+ (the_watchdog->delta_interval == 0) );
+}
diff --git a/c/src/exec/score/src/wkspace.c b/c/src/exec/score/src/wkspace.c
new file mode 100644
index 0000000000..1dda9408bb
--- /dev/null
+++ b/c/src/exec/score/src/wkspace.c
@@ -0,0 +1,88 @@
+/*
+ * Workspace Handler
+ *
+ * XXX
+ *
+ * NOTE:
+ *
+ * COPYRIGHT (c) 1989, 1990, 1991, 1992, 1993, 1994.
+ * On-Line Applications Research Corporation (OAR).
+ * All rights assigned to U.S. Government, 1994.
+ *
+ * This material may be reproduced by or for the U.S. Government pursuant
+ * to the copyright license under the clause at DFARS 252.227-7013. This
+ * notice must appear in all copies of this file and its derivatives.
+ *
+ * $Id$
+ */
+
+#include <rtems/system.h>
+#include <rtems/score/wkspace.h>
+#include <rtems/score/interr.h>
+
+/*PAGE
+ *
+ * _Workspace_Handler_initialization
+ */
+
+void _Workspace_Handler_initialization(
+ void *starting_address,
+ unsigned32 size
+)
+{
+ unsigned32 *zero_out_array;
+ unsigned32 index;
+ unsigned32 memory_available;
+
+ if ( !starting_address || !_Addresses_Is_aligned( starting_address ) )
+ _Internal_error_Occurred(
+ INTERNAL_ERROR_CORE,
+ TRUE,
+ INTERNAL_ERROR_INVALID_WORKSPACE_ADDRESS
+ );
+
+ if ( _CPU_Table.do_zero_of_workspace ) {
+ for( zero_out_array = (unsigned32 *) starting_address, index = 0 ;
+ index < size / 4 ;
+ index++ )
+ zero_out_array[ index ] = 0;
+ }
+
+ memory_available = _Heap_Initialize(
+ &_Workspace_Area,
+ starting_address,
+ size,
+ CPU_HEAP_ALIGNMENT
+ );
+
+ if ( memory_available == 0 )
+ _Internal_error_Occurred(
+ INTERNAL_ERROR_CORE,
+ TRUE,
+ INTERNAL_ERROR_TOO_LITTLE_WORKSPACE
+ );
+}
+
+/*PAGE
+ *
+ * _Workspace_Allocate_or_fatal_error
+ *
+ */
+
+void *_Workspace_Allocate_or_fatal_error(
+ unsigned32 size
+)
+{
+ void *memory;
+
+ memory = _Workspace_Allocate( size );
+
+ if ( memory == NULL )
+ _Internal_error_Occurred(
+ INTERNAL_ERROR_CORE,
+ TRUE,
+ INTERNAL_ERROR_WORKSPACE_ALLOCATION
+ );
+
+ return memory;
+}