Logo Search packages:      
Sourcecode: u++ version File versions

uC++.cc

//                              -*- Mode: C++ -*- 
// 
// uC++ Version 5.0.1, Copyright (C) Peter A. Buhr 1994
// 
// uC++.cc -- 
// 
// Author           : Peter Buhr
// Created On       : Fri Dec 17 22:10:52 1993
// Last Modified By : Peter A. Buhr
// Last Modified On : Thu Sep  2 12:28:41 2004
// Update Count     : 1893
//
// This  library is free  software; you  can redistribute  it and/or  modify it
// under the terms of the GNU Lesser General Public License as published by the
// Free Software  Foundation; either  version 2.1 of  the License, or  (at your
// option) any later version.
// 
// This library is distributed in the  hope that it will be useful, but WITHOUT
// ANY  WARRANTY;  without even  the  implied  warranty  of MERCHANTABILITY  or
// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public License
// for more details.
// 
// You should  have received a  copy of the  GNU Lesser General  Public License
// along  with this library.
// 


#define __U_KERNEL__
#include <uC++.h>
#include <uProfiler.h>
#include <uHeapLmmm.h>
#include <uBootTask.h>
#include <uSystemTask.h>
//#include <uDebug.h>

#include <exception>
#include <dlfcn.h>
#include <cstdio>
#include <unistd.h>                             // _exit

#if defined( __solaris__ )
#include <sys/lwp.h>

#elif defined( __linux__ )
#if defined( __i386__ )
#include <asm/unistd.h>                         // for _syscall3, __NR_modify_ldt

#ifndef _syscall3                           // fedora change to account for moved definition of _syscall3
#include <linux/unistd.h>
#endif

#include <asm/ldt.h>                            // for struct modify_ldt_ldt_s
#include <linux/version.h>                      // for KERNEL_VERSION

// modify_ldt has no prototype in the headers
// this is copied from the man page
_syscall3( int, modify_ldt, int, func, void *, ptr, unsigned long, bytecount );

#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,40)
#define modify_ldt_ldt_s user_desc              // name change
#endif

#define REGPARM __attribute__(( regparm(2) ))

#elif defined( __ia64__ )

#define REGPARM

#endif

extern "C" void *_dl_deallocate_tls( void *mem, bool dealloc_tcb ) REGPARM;

#elif defined( __irix__ )
#include <ulocks.h>                             // for usconfig
#include <sys/wait.h>                           // for waitpid

#else
    #error uC++ internal error : unsupported architecture
#endif

bool            uKernelModule::uKernelModuleInitialized     = false;
bool            uKernelModule::uInitialization        = false;
bool        uKernelModule::uCoreDumped          = false;
#ifndef __U_MULTI__
bool        uKernelModule::uDeadlock            = false;
#endif // ! __U_MULTI__
bool        uKernelModule::uGlobalAbort         = false;
bool        uKernelModule::uGlobalSpinAbort           = false;
uSpinLock   *uKernelModule::uGlobalAbortLock    = NULL;
uSpinLock   *uKernelModule::uGlobalProcessorLock      = NULL;
uSpinLock   *uKernelModule::uGlobalClusterLock  = NULL;
uDefaultScheduler *uKernelModule::uSystemScheduler    = NULL;
uCluster    *uKernelModule::uSystemCluster            = NULL;
uProcessor  *uKernelModule::uSystemProcessor    = NULL;
uBootTask   *uKernelModule::uTaskBoot           = (uBootTask *)&uTaskBootStorage;
uSystemTask *uKernelModule::uTaskSystem         = NULL;
uProcessor  *uKernelModule::uUserProcessor            = NULL;
uCluster    *uKernelModule::uUserCluster        = NULL;

unsigned int    uKernelModule::uAttaching       = 0; // debugging

char        uKernelModule::uSystemProcessorStorage[sizeof(uProcessor)] __attribute__(( aligned (16) )) = {0};
char        uKernelModule::uSystemClusterStorage[sizeof(uCluster)] __attribute__(( aligned (16) )) = {0};
char        uKernelModule::uTaskBootStorage[sizeof(uBootTask)] __attribute__(( aligned (16) )) = {0};

// Fake uKernelModule used before uKernelBoot::startup.
volatile __U_THREAD__ uKernelModule uKernelModule::uKernelModuleBoot;

uProcessorSeq     *uKernelModule::uGlobalProcessors   = NULL;
uClusterSeq *uKernelModule::uGlobalClusters           = NULL;

#ifdef __U_FLOATINGPOINTDATASIZE__
int         uFloatingPointContext::uUniqueKey   = 0;
#endif // __U_FLOATINGPOINTDATASIZE__

#define __U_TIMEOUTPOSN__ 0                     // bit 0 is reserved for timeout
#define __U_DESTRUCTORPOSN__ 1                        // bit 1 is reserved for destructor

#ifndef __U_MULTI__
uNBIO       *uCluster::NBIO                     = NULL;
#endif // ! __U_MULTI__

int         uKernelBoot::uCount                 = 0;


extern "C" void _pthread_deletespecific( void * );    // see pthread simulation


//######################### main #########################


// Declare a variable in shared memory to obtain a return code from the user
// program.  Unless modified, the return code is zero.

int uRetCode = 0;


// The main routine that gets the first task started with the OS supplied
// arguments, waits for its completion, and returns the result code to the OS.
// Define in this translation unit so it cannot be replaced by a user.

#if defined( __irix__ )
extern "C" void __do_global_dtors();
extern "C" void _exithandle();
extern "C" void _cleanup();
#endif // __irix__

int main( int argc, char *argv[] ) {
    {
      uMain uUserMain( *uKernelModule::uUserCluster, uMainStackSize(), argc, argv, uRetCode );
    }

    // Return the program return code to the operating system.

#if defined( __irix__ )
    // SKULLDUGGERY: irix does not run global destructors until all sprocs
    // have terminated.  But uC++ depends on destructors to terminate sprocs.
    // So, invoke the destructor sequence by hand here, and then _exit to
    // prevent the destructors from being done twice.  Because _exit stops
    // execution, all of the regular cleanups must done.

    _exithandle();                              // atexit functions
    _cleanup();                                 // close files
    __do_global_dtors();                        // global destructors
    _exit( uRetCode );                          // 
#endif // __irix__

    return uRetCode;
} // main


//######################### uSpinLock #########################


extern "C" int uAtomic( unsigned int *lock ) asm("uAtomic"); // assembler routine that performs atomic read/write


void *uSpinLock::operator new( size_t, void *storage ) {
    return storage;
} // uSpinLock::operator new

void *uSpinLock::operator new( size_t size ) {
    return ::operator new( size );
} // uSpinLock::operator new


uSpinLock::uSpinLock() {
    uValue = 0;                                 // unlock
} // uSpinLock::uSpinLock


void uSpinLock::acquire() {
    // No race condition exists for accessing uDisableIntSpin in the
    // multiprocessor case because this variable is private to each UNIX
    // process. Also, the spin lock must be acquired after adjusting
    // uDisableIntSpin because the time slicing must see the attempt to access
    // the lock first to prevent live-lock on the same processor.  For example,
    // one task acquires the ready queue lock, a time slice occurs, and it does
    // not appear that the current task is in the kernel because
    // uDisableIntSpin is not set so the signal handler tries to yield.
    // However, the ready queue lock is held so the yield live-locks. There is
    // a similar situation on releasing the lock.

#ifdef __U_DEBUG__
#ifndef __U_MULTI__
    if ( uValue != 0 ) {                        // locked ?
      uAbort( "(uSpinLock &)0x%p.acquire() : internal error, attempt to multiply acquire spin lock by same task.", this );
    } // if
#endif // __U_MULTI__
#endif // __U_DEBUG__

    THREAD_GETMEM( uSelf )->uDisableIntSpinLock();

#ifdef __U_MULTI__
    if ( uAtomic( &uValue ) != 0 ) {                  // test and set, uAtomic returns 0 or non-zero

#if 0
        THREAD_GETMEM( uSelf )->uDisableInterrupts();
      if ( uThisTask().uProfileActive && uProfiler::uProfiler_BuiltInRegisterTaskStartSpin ) {
          (*uProfiler::uProfiler_BuiltInRegisterTaskStartSpin)( uProfiler::uProfilerInstance, uThisTask() );
      } // if
      THREAD_GETMEM( uSelf )->uEnableInterrupts();
#endif

      int spin = 1;
      for ( ;; ) {                              // poll for lock
          THREAD_GETMEM( uSelf )->uEnableIntSpinLock();
          for ( int i = 0; i < spin; i += 1 ) { // exponential spin
            if ( uKernelModule::uGlobalSpinAbort ) {
                  _exit( -1 );                  // close down in progress, shutdown immediately!
            } // if
          } // for
          spin += spin;                   // powers of 2
          if ( spin > 65536 ) spin = 1;         // prevent overflow
          THREAD_GETMEM( uSelf )->uDisableIntSpinLock();
        if ( uAtomic( &uValue ) == 0 ) break;         // test and set, uAtomic returns 0 or non-zero
      } // for

#if 0
        THREAD_GETMEM( uSelf )->uDisableInterrupts();
      if ( uThisTask().uProfileActive && uProfiler::uProfiler_BuiltInRegisterTaskStopSpin ) {
          (*uProfiler::uProfiler_BuiltInRegisterTaskStopSpin)( uProfiler::uProfilerInstance, uThisTask() );
      } // if
      THREAD_GETMEM( uSelf )->uEnableInterrupts();
#endif
    } // if
#if defined( __alpha__ )
    asm( "mb" );                          // flush the cache
#endif // __alpha__

#else
    uValue = 1;                                 // lock
#endif // __U_MULTI__
} // uSpinLock::acquire

// Same as acquire, except it calls uKernelModule::uEnableIntSpinLockNoRF()
void uSpinLock::uAcquireNoRF() {
    // No race condition exists for accessing uDisableIntSpin in the
    // multiprocessor case because this variable is private to each UNIX
    // process. Also, the spin lock must be acquired after adjusting
    // uDisableIntSpin because the time slicing must see the attempt to access
    // the lock first to prevent live-lock on the same processor.  For example,
    // one task acquires the ready queue lock, a time slice occurs, and it does
    // not appear that the current task is in the kernel because
    // uDisableIntSpin is not set so the signal handler tries to yield.
    // However, the ready queue lock is held so the yield live-locks. There is
    // a similar situation on releasing the lock.

#ifdef __U_DEBUG__
#ifndef __U_MULTI__
    if ( uValue != 0 ) {                        // locked ?
      uAbort( "(uSpinLock &)0x%p.uAcquire() : internal error, attempt to multiply acquire spin lock by same task.", this );
    } // if
#endif // __U_MULTI__
#endif // __U_DEBUG__

    THREAD_GETMEM( uSelf )->uDisableIntSpinLock();

#ifdef __U_MULTI__
    if ( uAtomic( &uValue ) != 0 ) {                  // test and set, uAtomic returns 0 or non-zero

#if 0
      THREAD_GETMEM( uSelf )->uDisableInterrupts();
      if ( uThisTask().uProfileActive && uProfiler::uProfiler_BuiltInRegisterTaskStartSpin ) {
          (*uProfiler::uProfiler_BuiltInRegisterTaskStartSpin)( uProfiler::uProfilerInstance, uThisTask() );
      } // if
      THREAD_GETMEM( uSelf )->uEnableInterrupts();
#endif

      int spin = 1;
      for ( ;; ) {                              // poll for lock
          THREAD_GETMEM( uSelf )->uEnableIntSpinLockNoRF();
          for ( int i = 0; i < spin; i += 1 ) { // exponential spin
            if ( uKernelModule::uGlobalSpinAbort ) {
                  _exit( -1 );                  // close down in progress, shutdown immediately!
            } // if
          } // for
          spin += spin;                   // powers of 2
          if ( spin > 65536 ) spin = 1;         // prevent overflow
          THREAD_GETMEM( uSelf )->uDisableIntSpinLock();
        if ( uAtomic( &uValue ) == 0 ) break;         // test and set, uAtomic returns 0 or non-zero
      } // for

#if 0
      THREAD_GETMEM( uSelf )->uDisableInterrupts();
      if ( uThisTask().uProfileActive && uProfiler::uProfiler_BuiltInRegisterTaskStopSpin ) {
          (*uProfiler::uProfiler_BuiltInRegisterTaskStopSpin)( uProfiler::uProfilerInstance, uThisTask() );
      } // if
      THREAD_GETMEM( uSelf )->uEnableInterrupts();
#endif
    } // if
#if defined( __alpha__ )
    asm( "mb" );                          // flush the cache
#endif // __alpha__

#else
    uValue = 1;                                 // lock
#endif // __U_MULTI__
} // uSpinLock::uAcquireNoRF

bool uSpinLock::tryacquire() {
#ifdef __U_DEBUG__
#ifndef __U_MULTI__
    if ( uValue != 0 ) {                        // locked ?
      uAbort( "(uSpinLock &)0x%p.tryacquire() : internal error, attempt to multiply acquire spin lock by same task.", this );
    } // if
#endif // __U_MULTI__
#endif // __U_DEBUG__

    THREAD_GETMEM( uSelf )->uDisableIntSpinLock();

#ifdef __U_MULTI__
    if ( uAtomic( &uValue ) == 0 ) {                  // get the lock ?
#if defined( __alpha__ )
      asm( "mb" );                              // flush the cache
#endif // __alpha__
      return true;
    } else {
      THREAD_GETMEM( uSelf )->uEnableIntSpinLock();
      return false;
    } // if
#else
    uValue = 1;                                 // lock
    return true;
#endif // __U_MULTI__
} // uSpinLock::tryacquire

void uSpinLock::release() {
#if defined( __alpha__ )
    asm( "mb" );                          // flush the cache
#endif // __alpha__
    uAssert( uValue != 0 );
#if defined( __ia64__ )
    asm( "st4.rel %0 = r0" : "=m" (uValue) );
#else
    uValue = 0;                                 // unlock
#endif // __ia64__
#if defined( __sparc__ )
    asm( "membar #StoreLoad" );                       // flush the cache
#endif // __sparc__
    THREAD_GETMEM( uSelf )->uEnableIntSpinLock();
} // uSpinLock::release

// Same as release, except it calls uKernelModule::uEnableIntSpinLockNoRF()
void uSpinLock::uReleaseNoRF() {
#if defined( __alpha__ )
    asm( "mb" );                          // flush the cache
#endif // __alpha__
    uAssert( uValue != 0 );
#if defined( __ia64__ )
    asm( "st4.rel %0 = r0" : "=m" (uValue) );
#else
    uValue = 0;                                 // unlock
#endif // __ia64__
#if defined( __sparc__ )
    asm( "membar #StoreLoad" );                       // flush the cache
#endif // __sparc__
    THREAD_GETMEM( uSelf )->uEnableIntSpinLockNoRF();
} // uSpinLock::uReleaseNoRF


uCSpinLock::uCSpinLock( uSpinLock &SpinLock ) : SpinLock( SpinLock ) {
    SpinLock.acquire();
} // uCSpinLock::uCSpinLock

uCSpinLock::~uCSpinLock() {
    SpinLock.release();
} // uCSpinLock::~uCSpinLock


//######################### uLock #########################


void *uLock::operator new( size_t, void *storage ) {
    return storage;
} // uLock::operator new

void *uLock::operator new( size_t size ) {
    return ::operator new( size );
} // uLock::operator new

uLock::uLock() {
    uValue = 1;
} // uLock::uLock

uLock::uLock( unsigned int value ) {
#ifdef __U_DEBUG__
    if ( value > 1 ) {
      uAbort( ": attempt to initialize uLock 0x%p to %d, which exceeds range 0-1.", this, value );
    } // if
#endif // __U_DEBUG__
    uValue = value;
} // uLock::uLock

void uLock::acquire() {
    for ( ;; ) {
      SpinLock.acquire();
      if ( uValue == 1 ) break;
      SpinLock.release();
      uThisTask().uYield();
    } // for
    uValue = 0;
    SpinLock.release();
} // uLock::acquire

bool uLock::tryacquire() {
    SpinLock.acquire();
    if ( uValue == 1 ) {
      uValue = 0;
      SpinLock.release();
      return true;
    } else {
      SpinLock.release();
      return false;
    } // if
} // uLock::tryacquire

void uLock::release() {
    uValue = 1;
} // uLock::release


//######################### uOwnerLock #########################


void *uOwnerLock::operator new( size_t, void *storage ) {
    return storage;
} // uOwnerLock::operator new

void *uOwnerLock::operator new( size_t size ) {
    return ::operator new( size );
} // uOwnerLock::operator new

void uOwnerLock::_add( uBaseTask &t ) {               // used by uCondLock::signal
    spin.acquire();
    if ( _owner != NULL ) {                     // lock in use ?
      waiting.uAddTail( &(t.uEntryRef) );       // move task to owner lock list
    } else {
      _owner = &t;                              // become owner
      count = 1;
      t.uWake();                          // restart new owner
    } // if
    spin.release();
} // uOwnerLock::_add

void uOwnerLock::_release() {                   // used by uCondLock::wait
    spin.acquire();
    if ( ! waiting.uEmpty() ) {                       // waiting tasks ?
      _owner = &(waiting.uDropHead()->uGet());  // remove task at head of waiting list and make new owner
      count = 1;
      _owner->uWake();                    // restart new owner
    } else {
      _owner = NULL;                            // release, no owner
      count = 0;
    } // if
    spin.release();
} // uOwnerLock::_release

uOwnerLock::uOwnerLock() {
    _owner = NULL;                              // no one owns the lock
    count = 0;                                  // so count is zero
} // uOwnerLock::uOwnerLock

#ifdef __U_DEBUG__
uOwnerLock::~uOwnerLock() {
    spin.acquire();
    if ( ! waiting.uEmpty() ) {
      uBaseTask *t = &(waiting.uHead()->uGet());      // waiting list could change as soon as spin lock released
      spin.release();
      uAbort( ": attempt to delete owner lock with task %.256s (0x%p) still on it.", t->uGetName(), t );
    } // if
    spin.release();
} // uOwnerLock::uOwnerLock
#endif // __U_DEBUG__

unsigned int uOwnerLock::times() const {
    return count;
} // uOwnerLock::times

uBaseTask *uOwnerLock::owner() const {
    return _owner;
} // uOwnerLock::times

void uOwnerLock::acquire() {
    uBaseTask &t = uThisTask();                       // optimization

    if ( uKernelModule::uInitialization ) uAssert( ! THREAD_GETMEM( uDisableInt ) && THREAD_GETMEM( uDisableIntCnt ) == 0 );
    spin.acquire();
    if ( _owner != &t ) {                       // don't own lock yet
      if ( _owner != NULL ) {                   // but if lock in use
          waiting.uAddTail( &(t.uEntryRef) );         // suspend current task
          uSCHEDULE( &spin );                   // atomically release owner spin lock and block
          // _owner and count set in release
          return;
      } else {
          _owner = &t;                    // become owner
          count = 1;
      } // if
    } else {
      count += 1;                         // remember how often
    } // if
    spin.release();
} // uOwnerLock::acquire

bool uOwnerLock::tryacquire() {
    uBaseTask &t = uThisTask();                       // optimization

    if ( uKernelModule::uInitialization ) uAssert( ! THREAD_GETMEM( uDisableInt ) && THREAD_GETMEM( uDisableIntCnt ) == 0 );
    spin.acquire();
    if ( _owner != &t ) {                       // don't own lock yet
      if ( _owner != NULL ) {                   // but if lock in use
          spin.release();
          return false;                   // don't wait for the lock
      } // if
      _owner = &t;                              // become owner
      count = 1;
    } else {
      count += 1;                         // remember how often
    } // if
    spin.release();
    return true;
} // uOwnerLock::tryacquire

void uOwnerLock::release() {
    if ( uKernelModule::uInitialization ) uAssert( ! THREAD_GETMEM( uDisableInt ) && THREAD_GETMEM( uDisableIntCnt ) == 0 );
    spin.acquire();
#ifdef __U_DEBUG__
    if ( _owner != &uThisTask() ) {
      uBaseTask *t = _owner;                    // owner could change as soon as spin lock released
      spin.release();
      uAbort( ": attempt to release owner lock, which is currently owned by task %.256s (0x%p).", t->uGetName(), t );
    } // if
#endif // __U_DEBUG__
    count -= 1;                                 // release the lock
    if ( count == 0 ) {                         // if this is the last
      if ( ! waiting.uEmpty() ) {               // waiting tasks ?
          _owner = &(waiting.uDropHead()->uGet());    // remove task at head of waiting list and make new owner
          count = 1;
          _owner->uWake();                      // restart new owner
      } else {
          _owner = NULL;                        // release, no owner
      } // if
    } // if
    spin.release();
} // uOwnerLock::release


//######################### uCondLock #########################


void *uCondLock::operator new( size_t, void *storage ) {
    return storage;
} // uCondLock::operator new

void *uCondLock::operator new( size_t size ) {
    return ::operator new( size );
} // uCondLock::operator new

uCondLock::uCondLock() {
} // uCondLock::uCondLock

#ifdef __U_DEBUG__
uCondLock::~uCondLock() {
    spin.acquire();
    if ( ! waiting.uEmpty() ) {
      uBaseTask *t = &(waiting.uHead()->uGet());
      spin.release();
      uAbort( ": attempt to delete owner lock with task %.256s (0x%p) still blocked on it.", t->uGetName(), t );
    } // if
    spin.release();
} // uCondLock::uCondLock
#endif // __U_DEBUG__

bool uCondLock::empty() const {
    return waiting.uEmpty();
} // uCondLock::empty

void uCondLock::wait( uOwnerLock &lock ) {
    uBaseTask &t = uThisTask();                       // optimization
#ifdef __U_DEBUG__
    uBaseTask *owner = lock.owner();                  // owner could change
    if ( owner != &t ) {
      uAbort( ": attempt by waiting task %.256s (0x%p) to release owner lock currently owned by task %.256s (0x%p).",
            t.uGetName(), &t, owner->uGetName(), owner );
    } // if
#endif // __U_DEBUG__
    t.uLockPtr = &lock;                         // task remembers this lock before blocking for use in signal
    unsigned int prevcnt = lock.count;                // remember this lock's recursive count before blocking
    spin.acquire();
    waiting.uAddTail( &(t.uEntryRef) );               // queue current task
    // Must add to the condition queue first before releasing the owner lock
    // because testing for empty condition can occur immediately after the
    // owner lock is released.
    lock._release();                            // release owner lock
    uSCHEDULE( &spin );                         // atomically release condition spin lock and block
    // spin released by uSchedule, owner lock is acquired when task restarts
    uAssert( &t == lock.owner() );
    lock.count = prevcnt;                       // reestablish lock's recursive count after blocking
} // uCondLock::wait

bool uCondLock::timedwait( uOwnerLock &lock, uDuration duration ) {
    return timedwait( lock, uActiveProcessorKernel->uKernelClock.uGetTime() + duration );
} // uCondLock::timedwait

bool uCondLock::timedwait( uOwnerLock &lock, uTime time ) {
    uBaseTask &t = uThisTask();                       // optimization
#ifdef __U_DEBUG__
    uBaseTask *owner = lock.owner();                  // owner could change
    if ( owner != &t ) {
      uAbort( ": attempt by waiting task %.256s (0x%p) to release owner lock currently owned by task %.256s (0x%p).",
            t.uGetName(), &t, owner->uGetName(), owner );
    } // if
#endif // __U_DEBUG__
    t.uLockPtr = &lock;                         // task remembers this lock before blocking for use in signal
    unsigned int prevcnt = lock.count;                // remember this lock's recursive count before blocking
    spin.acquire();

#ifdef __U_DEBUG_H__
    uDebugPrt( "(uCondLock &)0x%p.timedwait, task:0x%p\n", this, &t );
#endif // __U_DEBUG_H__

    uCondLockTimedwaitHndlr handler( t, *this );      // handler to wake up blocking task

    uEventNode uTimeoutEvent( t, handler, time, 0 );
    uTimeoutEvent.uExecuteLocked = true;

    uProcessor &uProc = uThisProcessor();
    uEventList *uEvents = uProc.uEvents;
    uEvents->uAddEvent( uTimeoutEvent, uProc );

    waiting.uAddTail( &(t.uEntryRef) );               // queue current task
    // Must add to the condition queue first before releasing the owner lock
    // because testing for empty condition can occur immediately after the
    // owner lock is released.
    lock._release();                            // release owner lock
    uSCHEDULE( &spin );                         // atomically release owner spin lock and block
    // spin released by uSchedule, owner lock is acquired when task restarts
    uAssert( &t == lock.owner() );
    lock.count = prevcnt;                       // reestablish lock's recursive count after blocking

    uEvents->uRemoveEvent( uTimeoutEvent, uProc );

    return ! handler.uTimedout;
} // uCondLock::timedwait

void uCondLock::uWaitTimeout( uBaseTask &t, uCondLockTimedwaitHndlr &h ) {
    // This uCondLock member is called from the kernel, and therefore, cannot
    // block, but it can spin.

    spin.acquire();
#ifdef __U_DEBUG_H__
    uDebugPrt( "(uCondLock &)0x%p.uWaitTimeout, task:0x%p\n", this, &t );
#endif // __U_DEBUG_H__
    if ( t.uEntryRef.uListed() ) {              // is task on queue
      waiting.uRemove( &(t.uEntryRef) );        // remove task at head of waiting list
      h.uTimedout = true;
      spin.release();
      t.uLockPtr->_add( t );                    // restart it or chain to its owner lock
    } else {
      spin.release();
    } // if
} // uCondLock::uWaitTimeout

void uCondLock::signal() {
    spin.acquire();
    if ( waiting.uEmpty() ) {                   // signal on empty condition is no-op
      spin.release();
      return;
    } // if
    uBaseTask &t = waiting.uDropHead()->uGet();       // remove task at head of waiting list
    spin.release();
    t.uLockPtr->_add( t );                      // restart it or chain to its owner lock
} // uCondLock::signal

void uCondLock::broadcast() {
    // It is impossible to chain the entire waiting list to the associated
    // owner lock because each wait can be on a different owner lock. Hence,
    // each task has to be individually processed to move it onto the correct
    // owner lock.

    uQueue<uBaseTaskDL> temp;
    spin.acquire();
    uQueue<uBaseTaskDL>::transfer( temp, waiting );
    spin.release();
    while ( ! temp.uEmpty() ) {
      uBaseTask &t = temp.uDropHead()->uGet();  // remove task at head of waiting list
      t.uLockPtr->_add( t );                    // restart it or chain to its owner lock
    } // while
} // uCondLock::broadcast


//######################### Real-Time #########################


uWakeupHndlr::uWakeupHndlr( uBaseTask &t ) {
    uThis = &t;
} // uWakeupHndlr::uWakeupHndlr

void uWakeupHndlr::uHandler() {
    uThis->uWake();
} // uWakeupHndlr::uHandler

void uCxtSwtchHndlr::uHandler() {
    // Do not use uYield here because it polls for async events. Async events
    // cannot be delivered because there is a signal handler stack frame on the
    // current stack, and it is unclear what the semantics are for abnormally
    // terminating that frame.

    uThisTask().uYieldInvoluntary();
} // uCxtSwtchHndlr::uHandler

uTimeoutHndlr::uTimeoutHndlr( uBaseTask &t, uSerial &serial ) : serial( serial ){
    uThis = &t;
} // uTimeoutHndlr::uTimeoutHndlr

uTimeoutHndlr::uTimeoutHndlr( uSerial &serial ) : serial( serial ){
    uThis = NULL;
} // uTimeoutHndlr::uTimeoutHndlr

void uTimeoutHndlr::uHandler() {
    serial.uEnterTimeout();
} // uTimeoutHndlr::uHandler

uSelectTimeoutHndlr::uSelectTimeoutHndlr( uBaseTask &t, uNBIOnode &n ) : uNode( n ){
    uThis = &t;
} // uTimeoutHndlr::uTimeoutHndlr

void uSelectTimeoutHndlr::uHandler() {
    uNode.uTimedout = true;
} // uTimeoutHndlr::uHandler


uCondLockTimedwaitHndlr::uCondLockTimedwaitHndlr( uBaseTask &t, uCondLock &condlock ) : condlock( condlock ) {
    uThis = &t;
    uTimedout = false;
} // uCondLockTimedwaitHndlr::uCondLockTimedwaitHndlr

uCondLockTimedwaitHndlr::uCondLockTimedwaitHndlr( uCondLock &condlock ) : condlock( condlock ) {
    uThis = NULL;
    uTimedout = false;
} // uCondLockTimedwaitHndlr::uCondLockTimedwaitHndlr

void uCondLockTimedwaitHndlr::uHandler() {
    condlock.uWaitTimeout( *uThis, *this );
} // uCondLockTimedwaitHndlr::uHandler


uBaseTask &uBaseScheduleFriend::uGetInheritTask( uBaseTask &t ) const {
    return t.uGetInheritTask();
} // uBaseScheduleFriend::uGetInheritTask

int uBaseScheduleFriend::uGetActivePriority( uBaseTask &t ) const {
    // special case for base of active priority stack
    return t.uGetActivePriority();
} // uBaseScheduleFriend::uGetActivePriority

int uBaseScheduleFriend::uGetActivePriorityValue( uBaseTask &t ) const {
    return t.uGetActivePriorityValue();
} // uBaseScheduleFriend::uGetActivePriorityValue

int uBaseScheduleFriend::uSetActivePriority( uBaseTask &t1, int p ) {
    return t1.uSetActivePriority( p );
} // uBaseScheduleFriend::uSetActivePriority

int uBaseScheduleFriend::uSetActivePriority( uBaseTask &t1, uBaseTask &t2 ) {
    return t1.uSetActivePriority( t2 );
} // uBaseScheduleFriend::uSetActivePriority

int uBaseScheduleFriend::uGetBasePriority( uBaseTask &t ) const {
    return t.uGetBasePriority();
} // uBaseScheduleFriend::uGetBasePriority

int uBaseScheduleFriend::uSetBasePriority( uBaseTask &t, int p ) {
    return t.uSetBasePriority( p );
} // uBaseScheduleFriend::uSetBasePriority

int uBaseScheduleFriend::uGetActiveQueueValue( uBaseTask &t ) const {
    return t.uGetActiveQueueValue();
} // uBaseScheduleFriend::uGetActiveQueueValue

int uBaseScheduleFriend::uSetActiveQueue( uBaseTask &t1, int q ) {
    return t1.uSetActiveQueue( q );
} // uBaseScheduleFriend::uSetActiveQueue

int uBaseScheduleFriend::uGetBaseQueue( uBaseTask &t ) const {
    return t.uGetBaseQueue();
} // uBaseScheduleFriend::uGetBaseQueue

int uBaseScheduleFriend::uSetBaseQueue( uBaseTask &t, int q ) {
    return t.uSetBaseQueue( q );
} // uBaseScheduleFriend::uSetBaseQueue

bool uBaseScheduleFriend::uIsEntryBlocked( uBaseTask &t ) const {
    return t.uEntryRef.uListed();
} // uBaseScheduleFriend::uIsEntryBlocked

bool uBaseScheduleFriend::uCheckHookConditions( uBaseTask &t1, uBaseTask &t2 ) const {
    return t2.uGetSerial()->uCheckHookConditions( &t1 );
} // uBaseScheduleFriend::uCheckHookConditions


//######################### uBasePrioritySeq #########################


uBasePrioritySeq::uBasePrioritySeq() {
    uExecuteHooks = false;
} // uBasePrioritySeq::uBasePrioritySeq

bool uBasePrioritySeq::uEmpty() const {
    return list.uEmpty();
} // uBasePrioritySeq::uEmpty

uBaseTaskDL *uBasePrioritySeq::uHead() const {
    return list.uHead();
} // uBasePrioritySeq::uHead

int uBasePrioritySeq::uAdd( uBaseTaskDL *node, uBaseTask *uOwner ) {
    list.uAddTail( node );
    return 0;
} // uBasePrioritySeq::uAdd

uBaseTaskDL *uBasePrioritySeq::uDrop() {
    return list.uDropHead();
} // uBasePrioritySeq::uDrop

void uBasePrioritySeq::uRemove( uBaseTaskDL *node ) {
    list.uRemove( node );
} // uBasePrioritySeq::uRemove

void uBasePrioritySeq::uOnAcquire( uBaseTask &uOwner ) {
} // uBasePrioritySeq::uOnAcquire

void uBasePrioritySeq::uOnRelease( uBaseTask &uOldOwner ) {
} // uBasePrioritySeq::uOnRelease

int uBasePrioritySeq::uReposition( uBaseTask *t, uSerial *s ) {
    uRemove( &(t->uEntryRef) );                       // remove from entry queue
    t->uCalledEntryMem->uRemove( &(t->uMutexRef) );   // remove from mutex queue
    
    // Call cluster routine to adjust ready queue and active priority as owner
    // is not on entry queue, it can be updated based on its uPIQ.

    uThisCluster().uTaskSetPriority( *t, *t );
    
    t->uCalledEntryMem->uAdd( &(t->uMutexRef), s->uMutexOwner ); // add to mutex queue
    return uAdd( &(t->uEntryRef), s->uMutexOwner );   // add to entry queue, automatically does transitivity
} // uBasePrioritySeq::uReposition


//######################### uBasePriorityQueue #########################


bool uBasePriorityQueue::uEmpty() const {
    return list.uEmpty();
} // uBasePriorityQueue::uEmpty

uBaseTaskDL *uBasePriorityQueue::uHead() const {
    return list.uHead();
} // uBasePriorityQueue::uHead

int uBasePriorityQueue::uAdd( uBaseTaskDL *node, uBaseTask *uOwner ) {
    list.uAdd( node );
    return 0;                                   // dummy value
} // uBasePriorityQueue::uAdd

uBaseTaskDL *uBasePriorityQueue::uDrop() {
    return list.uDrop();
} // uBasePriorityQueue::uDrop

void uBasePriorityQueue::uRemove( uBaseTaskDL *node ) {
    // Only used with default FIFO case, so node to remove is at the front of
    // the list.
    list.uDrop();
} // uBasePriorityQueue::uRemove

void uBasePriorityQueue::uOnAcquire( uBaseTask &uOwner ) {
} // uBasePriorityQueue::uOnAcquire

void uBasePriorityQueue::uOnRelease( uBaseTask &uOldOwner ) {
} // uBasePriorityQueue::uOnRelease



//######################### uRepositionEntry #########################


uRepositionEntry::uRepositionEntry( uBaseTask &b, uBaseTask &c ) : blocked(b), calling(c) {
    bSerial = blocked.uGetSerial();
    cSerial = calling.uGetSerial();
} // uRepositionEntry::uRepositionEntry

int uRepositionEntry::uReposition( bool RelCallingLock ) {
    int uRelPrevLock = 0;
    
    bSerial->lock.acquire();
    
    // If owner's current mutex object changes, then owner fixes its own active
    // priority. Recheck if inheritance is necessary as only owner can lower
    // its priority => updated.

    if ( bSerial != blocked.uGetSerial() || ! blocked.uEntryRef.uListed() ||
       blocked.uPIQ->uGetHighestPriority() >= blocked.uGetActivePriorityValue() ) {
      // As owner restarted, the end of the blocking chain has been reached.
      bSerial->lock.release();
      return uRelPrevLock;
    } // if

    if ( RelCallingLock == true ) {
      // release the old lock as correct current lock is acquired
      cSerial->lock.release();
      uRelPrevLock = 1;
    } // if

    if ( bSerial->uEntryList.uReposition( &blocked, bSerial ) == 0 ) {
      // only last call does not release lock, so reacquire first entry lock
      if ( RelCallingLock == true ) uThisTask().uGetSerial()->lock.acquire();
      bSerial->lock.release();
    } // if

    // The return value is based on the release of cSerial->lock not
    // bSerial->lock.  The return value from bSerial is processed in the if
    // statement above, so it does not need to be propagated.

    return uRelPrevLock;
} // uRepositionEntry::uReposition


//######################### uDefaultScheduler #########################


bool uDefaultScheduler::uEmpty() const {
    return list.uEmpty();
} // uDefaultScheduler::uEmpty

void uDefaultScheduler::uAdd( uBaseTaskDL *node ) {
    list.uAddTail( node );
} // uDefaultScheduler::uAdd

uBaseTaskDL *uDefaultScheduler::uDrop() {
    return list.uDropHead();
} // uDefaultScheduler::uDrop

bool uDefaultScheduler::uCheckPriority( uBaseTaskDL &, uBaseTaskDL & ) { return false; }

void uDefaultScheduler::uResetPriority( uBaseTaskDL &, uBaseTaskDL & ) {}

void uDefaultScheduler::uAddInitialize( uBaseTaskSeq & ) {};

void uDefaultScheduler::uRemoveInitialize( uBaseTaskSeq & ) {};

void uDefaultScheduler::uRescheduleTask( uBaseTaskDL *, uBaseTaskSeq & ) {};


//######################### uKernelModule #########################


void uKernelModule::startup() {
    uKernelModule::uKernelModuleInitialized = true;
    volatile uKernelModule *km;

#if defined( __irix__ ) && defined( __U_MULTI__ )
    // the user part of the PRDA holds the kernel module
    km = (uKernelModule *)&(PRDA->usr_prda);
#elif defined( __ia64__ ) && defined( __linux__ ) && defined( __U_MULTI__ )
#if defined( __U_TLS__ )
    asm volatile ("addl %0 = @ltoff(@tprel(_ZN13uKernelModule17uKernelModuleBootE#)), gp;;\n"
                  "ld8 %0 = [%0];;\n"
                  "add %0 = %0, r13;;\n" : "=r" (km) );
#else
    km = &uKernelModule::uKernelModuleBoot;
#endif // __U_TLS__
#elif defined( __i386__ ) && defined( __linux__ ) && defined( __U_MULTI__ )
#if defined( __U_TLS__ )
    unsigned long tp;
    __asm__ __volatile__ ("movl %%gs:0,%1\n"
                          "leal _ZN13uKernelModule17uKernelModuleBootE@ntpoff(%1),%0"
                    : "=r" (km), "=r" (tp));
    km->threadPointer = tp;
#else
    km = &uKernelModule::uKernelModuleBoot;
    km->threadPointer = (unsigned long)km;
#endif // __U_TLS__
    km->ldtValue = uProcessor::uAllocLDT();
#else
    // use statically allocated kernel module
    km = &uKernelModule::uKernelModuleBoot;
#endif

    km->ctor();
} // uKernelModule::startup


void uKernelModule::ctor() volatile {
    uSelf = this;
    uKernelModuleInitialized = true;

    uActiveProcessor = (uProcessor *)&uKernelModule::uSystemProcessorStorage;
    uActiveCluster = (uCluster *)&uKernelModule::uSystemClusterStorage;
    uActiveTask = (uBaseTask *)&uTaskBootStorage;

    uDisableInt = true;
    uDisableIntCnt = 1;

    uDisableIntSpin = false;
    uDisableIntSpinCnt = 0;

    uInKernelRF = 0;

#if defined( __U_MULTI__ )
    // set private memory pointer
#if defined( __linux__ )
#if defined( __i386__ )
    int ldtIndex = ( ldtValue - 7 ) / 8;
    struct modify_ldt_ldt_s ldtEntry =
    {
      ldtIndex,
      (unsigned long int) threadPointer,
      0xfffff /* 4GB in pages */,
      1, 0, 0, 1, 0, 1 //, 0
    };
    if ( modify_ldt( 1, &ldtEntry, sizeof(ldtEntry) ) != 0 ) {
      uAbort( "(uKernelModule &)0x%p.ctor() : internal error, modify_ldt.", this );
    } // if
    asm volatile ( "movw %w0, %%gs" : : "q" (ldtValue) );

#elif defined( __ia64__ )
#if defined( __U_TLS__ )
    register volatile uKernelModule *thread_self asm( "r13" );
    threadPointer = (unsigned long)thread_self;
#else
    threadPointer = (unsigned long)&uIA64OffsetStoreFlag;
    uIA64OffsetStoreFlag = 0;
    /* Register r13 (tp) is reserved by the ABI as "thread pointer". */
    asm volatile ("mov r13=%0" : : "r" (&uIA64OffsetStoreFlag));
#endif

#else
    #error uC++ internal error : unsupported architecture
#endif

#elif defined( __solaris__ )
    _lwp_setprivate( (void *)this );

#elif defined( __irix__ )

#else
    #error uC++ internal error : unsupported architecture
#endif
#endif // __U_MULTI__
} // uKernelModule::ctor


void uKernelModule::dtor() volatile {
#if defined( __U_MULTI__ ) && defined( __linux__ ) && defined( __U_TLS__ )
    _dl_deallocate_tls( (void *)threadPointer, false );
#endif // __linux__ && __U_MULTI__ && __U_TLS__
} // uKernelModule::dtor


void uKernelModule::uRollForward( bool inKernel ) {
#ifdef __U_DEBUG_H__
    char buffer[256];
    int debugCnt = 0;
    uDebugPrtBuf( buffer, "uRollForward( %d ), uDisableInt:%d, uDisableIntCnt:%d, uDisableIntSpin:%d, uDisableIntSpinCnt:%d, uInKernelRF:%d\n",
              inKernel, THREAD_GETMEM(uDisableInt), THREAD_GETMEM(uDisableIntCnt), THREAD_GETMEM(uDisableIntSpin), THREAD_GETMEM(uDisableIntSpinCnt), THREAD_GETMEM(uInKernelRF) );
#endif // __U_DEBUG_H__

    uEventList *events = uThisProcessor().uEvents;    // copy the current processor as it could change during execution
    uEventNode *event;

    for ( uEventListPop gen( *events, inKernel ); gen >> event; ) {
#ifdef __U_DEBUG_H__
      debugCnt += 1;
      uDebugPrtBuf( buffer, "uRollForward, pop #%d\n", debugCnt );
#endif // __U_DEBUG_H__
    } // for

#ifdef __U_DEBUG_H__
    uDebugPrtBuf( buffer, "uRollForward, leaving, uInKernelRF:%d\n", THREAD_GETMEM( uInKernelRF) );
#endif // __U_DEBUG_H__
} // uRollForward


//######################### Translator Generated Definitions #########################


uCoroutineConstructor::uCoroutineConstructor( uAction f, uSerial &s, uBaseCoroutine &c, const char *n ) {
    if ( f == uYes ) {
      c.uStartHere( (void (*)( uMachContext & ))uMachContext::uInvokeCoroutine );
      c.uName = n;
      c.serial = &s;                            // set cormonitor's serial instance

      if ( uThisTask().uProfileActive && uProfiler::uProfiler_RegisterCoroutine && // profiling & coroutine registered for profiling ?
           dynamic_cast<uProcessorKernel *>(&c) == NULL ) { // and not kernel coroutine
          (*uProfiler::uProfiler_RegisterCoroutine)( uProfiler::uProfilerInstance, c, s );
      } // if
    } // if
} // uCoroutineConstructor::uCoroutineConstructor


uCoroutineDestructor::uCoroutineDestructor( uAction f, uBaseCoroutine &c ) : f( f ), c( c ) {
} // uCoroutineDestructor::uCoroutineDestructor

uCoroutineDestructor::~uCoroutineDestructor() {
    if ( f == uYes ) {
      if ( uThisTask().uProfileActive && uProfiler::uProfiler_DeregisterCoroutine ) { // profiling this coroutine & coroutine registered for profiling ? 
          (*uProfiler::uProfiler_DeregisterCoroutine)( uProfiler::uProfilerInstance, c );
      } // if
    } // if
} // uCoroutineDestructor::~uCoroutineDestructor


uCoroutineMain::uCoroutineMain( uBaseCoroutine &c ) : c( c ) {
    // also appears in uBaseCoroutine::uContextSw2
    if ( uThisTask().uProfileActive && uProfiler::uProfiler_RegisterCoroutineUnblock ) {
      if ( THREAD_GETMEM( uDisableInt ) ) return;     // ignore profiling for kernel coroutine
      (*uProfiler::uProfiler_RegisterCoroutineUnblock)( uProfiler::uProfilerInstance, uThisTask() );
    } // if
} // uCoroutineMain::uCoroutineMain

uCoroutineMain::~uCoroutineMain( ) {                  // necessary for KDB
} // uCoroutineMain::uCoroutineMain


uSerial::uSerial( uBasePrioritySeq &uEntryList ) : uEntryList( uEntryList ) {
    uMask.clrAll();                             // mutex members start closed
    uMutexOwner = &uThisTask();                       // set the current mutex owner *for the creating task*
    acceptMask = false;
    uMutexMaskPosn = NULL;

    uDestructorTask = NULL;  
    uDestructorStatus = NoDestructor;
    uConstructorTask = uMutexOwner;

    // real-time

    uTimeoutEvent.uExecuteLocked = true;
    uProc = NULL;
    uEvents = NULL;

    // exception handling

    uLastAcceptor = NULL;
    uAlive = true;
    uTimedoutRF = false;

    // profiling

    uProfileSerialSamplerInstance = NULL;
} // uSerial::uSerial

uSerial::~uSerial() {
    uAlive = false;                             // no more entry calls can be accepted
    uBaseTask &t = uThisTask();                       // optimization

    for ( ;; ) {
      uBaseTaskDL *p = uAcceptSignalled.uDrop();
      if ( p == NULL ) break;
      uMutexOwner = &(p->uGet());
      uThrow uEntryFailure( this, "blocked on acceptor/signalled stack" ) uAt *(uMutexOwner->uCurrCoroutine);
      uAcceptSignalled.uAdd( &(t.uMutexRef) );  // suspend current task on top of accept/signalled stack
      uSCHEDULE( uMutexOwner );
    } // for

    if ( ! uEntryList.uEmpty() ) {              // no need to acquire the lock if the queue is empty
      for ( ;; ) {
          lock.acquire();
          uBaseTaskDL *p = uEntryList.uDrop();
        if ( p == NULL ) break;
          uMutexOwner = &(p->uGet());
          uMutexOwner->uCalledEntryMem->uRemove( &(uMutexOwner->uMutexRef) );
          uThrow uEntryFailure( this, "blocked on entry queue" ) uAt *(uMutexOwner->uCurrCoroutine);
          uAcceptSignalled.uAdd( &(t.uMutexRef) ); // suspend current task on top of accept/signalled stack
          uSCHEDULE( &lock, uMutexOwner );
      } // for
      lock.release();
    } // if
} // uSerial::~uSerial

bool uSerial::uCheckHookConditions( uBaseTask *t ) {
    return t != uConstructorTask && t != uDestructorTask;
} // uSerial::uCheckHookConditions

void uSerial::uResetDestructorStatus() {
    uDestructorStatus = NoDestructor;
    uDestructorTask = NULL;
} // uSerial::uCheckHookConditions

void uSerial::uEnter( unsigned int &mr, uBasePrioritySeq &ml, int mp ) {
    uBaseTask &t = uThisTask();                       // optimization
    t.uSetSerial( *this );
    lock.acquire();

#ifdef __U_DEBUG_H__
    uDebugPrt( "(uSerial &)0x%p.uEnter, mask:0x%x,0x%x,0x%x,0x%x, owner:0x%p, ml:0x%p, mp:%d\n",
             this, uMask[0], uMask[1], uMask[2], uMask[3], uMutexOwner, &ml, mp );
#endif // __U_DEBUG_H__
    if ( uMask.isSet( mp ) ) {                        // member acceptable ?
      uMask.clrAll();                           // clear the mask
      mr = t.uMutexRecursion;                   // save previous recursive count
      t.uMutexRecursion = 0;                    // reset recursive count
      uMutexOwner = &t;                   // set the current mutex owner
      if ( uEntryList.uExecuteHooks ) {
          // always execute hook as calling task cannot be constructor or destructor
          uEntryList.uOnAcquire( *uMutexOwner );      // perform any priority inheritance
      } // if
      lock.release();
    } else if ( uMutexOwner == &t ) {                 // already hold mutex ?
      t.uMutexRecursion += 1;                   // another recursive call at the mutex object level
      lock.release();
    } else {                                    // otherwise block the calling task
      ml.uAdd( &(t.uMutexRef), uMutexOwner );         // add to end of mutex queue
      t.uCalledEntryMem = &ml;                  // remember which mutex member called
      uEntryList.uAdd( &(t.uEntryRef), uMutexOwner ); // add mutex object to end of entry queue
      uSCHEDULE( &lock );                       // find someone else to execute; release lock on kernel stack
      mr = t.uMutexRecursion;                   // save previous recursive count
      t.uMutexRecursion = 0;                    // reset recursive count
      uEnable <uSerial::uFailure> {
          uEHM::uPoll();
      } // uEnable
    } // if
    if ( uMutexMaskPosn != NULL ) *uMutexMaskPosn = mp;     // set active mutex member
} // uSerial::uEnter

// enter routine for destructor, does not poll
void uSerial::uEnterDestructor( unsigned int &mr, uBasePrioritySeq &ml, int mp ) {
    uBaseTask &t = uThisTask();                       // optimization

    if ( uDestructorStatus != NoDestructor ) {        // only one task is allowed to call destructor
      uAbort( ": attempt by task %.256s (0x%p) to call the destructor for uSerial 0x%p, but this destructor was already called by task %.256s (0x%p).\n"
            "Possible cause is multiple tasks simultaneously deleting a mutex object.",
            t.uGetName(), &t, this, uDestructorTask->uGetName(), uDestructorTask );
    } // if

    t.uSetSerial( *this );
    lock.acquire();

    uDestructorStatus = DestrCalled;
    uDestructorTask = &t;

#ifdef __U_DEBUG_H__
    uDebugPrt( "(uSerial &)0x%p.uEnterNoPoll, mask:0x%x,0x%x,0x%x,0x%x, owner:0x%p, ml:0x%p, mp:%d\n",
             this, uMask[0], uMask[1], uMask[2], uMask[3], uMutexOwner, &ml, mp );
#endif // __U_DEBUG_H__
    if ( uMask.isSet( mp ) ) {                        // member acceptable ?
      uMask.clrAll();                           // clear the mask
      mr = t.uMutexRecursion;                   // save previous recursive count
      t.uMutexRecursion = 0;                    // reset recursive count
      uMutexOwner = &t;                   // set the current mutex owner
      uDestructorStatus = DestrScheduled;
      // hook is not executed for destructor
      lock.release();
    } else if ( uMutexOwner == &t ) {                 // already hold mutex ?
      uAbort( ": attempt by task %.256s (0x%p) to call the destructor for uSerial 0x%p, but this task has outstanding nested calls to this mutex object.\n"
            "Possible cause is deleting a mutex object with outstanding nested calls to one of its members.",
            t.uGetName(), &t, this );
    } else {                                    // otherwise block the calling task
      t.uCalledEntryMem = &ml;                  // remember which mutex member was called
      uSCHEDULE( &lock );                       // find someone else to execute; release lock on kernel stack
      mr = t.uMutexRecursion;                   // save previous recursive count
      t.uMutexRecursion = 0;                    // reset recursive count
    } // if
    if ( uMutexMaskPosn != NULL ) *uMutexMaskPosn = mp;     // set active mutex member
} // uSerial::uEnterDestructor

void uSerial::uEnterTimeout() {
    // This monitor member is called from the kernel, and therefore, cannot
    // block, but it can spin.

    lock.acquire();

#ifdef __U_DEBUG_H__
    uDebugPrt( "(uSerial &)0x%p.uEnterTimeout, mask:0x%x,0x%x,0x%x,0x%x, owner:0x%p, uMutexMaskPosn:0x%p\n",
             this, uMask[0], uMask[1], uMask[2], uMask[3], uMutexOwner, uMutexMaskPosn );
#endif // __U_DEBUG_H__
    if ( uMask.isSet( __U_TIMEOUTPOSN__ ) ) {         // timeout member acceptable ?  0 => timeout mask bit
      uMask.clrAll();                           // clear the mask
      *uMutexMaskPosn = 0;                      // set timeout mutex member  0 => timeout mask bit
      uMutexOwner = &(uAcceptSignalled.uDrop()->uGet()); // next task to gain control of the mutex object
      
      // priority-inheritance, bump up priority of mutexowner from head
      // of prioritized entry queue (NOT leaving task), because suspended
      // stack is not prioritized.
      
      if ( uEntryList.uExecuteHooks && uCheckHookConditions( uMutexOwner ) ) {
          uEntryList.uOnAcquire( *uMutexOwner );
      } // if

#ifdef __U_DEBUG_H__
      uDebugPrt( "(uSerial &)0x%p.uEnterTimeout, waking task %.256s (0x%p) \n", this, uMutexOwner->uGetName(), uMutexOwner );
#endif // __U_DEBUG_H__
      uMutexOwner->uWake();                     // wake up next task to use this mutex object
    } // if

    lock.release();
} // uSerial::uEnterTimeout

// uLeave and uLeave2 do not poll for concurrent exceptions because they are
// called in some critical destructors.  Throwing an exception out of these
// destructors causes problems.

void uSerial::uLeave( unsigned int mr ) {       // used when a task is leaving a mutex and has not queued itself before calling
    uBaseTask &t = uThisTask();                       // optimization

    if ( t.uMutexRecursion != 0 ) {             // already hold mutex ?
      if ( acceptMask ) {
          // lock is acquired and mask set by accept statement
          acceptMask = false;
          lock.release();
      } // if
      t.uMutexRecursion -= 1;
    } else {
      if ( acceptMask ) {
          // lock is acquired and mask set by accept statement
          acceptMask = false;
          uMutexOwner = NULL;                   // reset no task in mutex object
          if ( uEntryList.uExecuteHooks && uCheckHookConditions( &t)  ) {
            uEntryList.uOnRelease( t );
          } // if
          if ( &t == uDestructorTask ) uResetDestructorStatus();
          lock.release();
      } else if ( uAcceptSignalled.uEmpty() ) { // no tasks waiting re-entry to mutex object ?
          lock.acquire();
          if ( uDestructorStatus != DestrCalled ) {
            if ( uEntryList.uEmpty() ) {        // no tasks waiting entry to mutex object ?
                uMask.setAll();                 // accept all members
                uMask.clr( 0 );                 // except timeout
                uMutexOwner = NULL;             // reset no task in mutex object
                if ( uEntryList.uExecuteHooks && uCheckHookConditions( &t)  ) {
                  uEntryList.uOnRelease( t );
                } // if
                if ( &t == uDestructorTask ) uResetDestructorStatus();
                lock.release();
            } else {                      // tasks wating entry to mutex object
                uMutexOwner = &(uEntryList.uDrop()->uGet()); // next task to gain control of the mutex object
                uMutexOwner->uCalledEntryMem->uRemove( &(uMutexOwner->uMutexRef) ); // also remove task from mutex queue
                if ( uEntryList.uExecuteHooks ) {
                  if ( uCheckHookConditions( &t) ) uEntryList.uOnRelease( t );
                  if ( uCheckHookConditions( uMutexOwner ) ) uEntryList.uOnAcquire( *uMutexOwner );
                } // if
                if ( &t == uDestructorTask ) uResetDestructorStatus();
                lock.release();
#ifdef __U_DEBUG_H__
                uDebugPrt( "(uSerial &)0x%p.uLeave, waking task %.256s (0x%p)\n", this, uMutexOwner->uGetName(), uMutexOwner );
#endif // __U_DEBUG_H__
                uMutexOwner->uWake();           // wake up next task to use this mutex object
            } // if
          } else {
            uMutexOwner = uDestructorTask;
            uDestructorStatus = DestrScheduled;
            if ( uEntryList.uExecuteHooks ) {
                if ( uCheckHookConditions( &t) ) uEntryList.uOnRelease( t );
                // do not call acquire the hook for the destructor
            } // if
            lock.release();
#ifdef __U_DEBUG_H__
                uDebugPrt( "(uSerial &)0x%p.uLeave, waking task %.256s (0x%p)\n", this, uMutexOwner->uGetName(), uMutexOwner );
#endif // __U_DEBUG_H__
            uMutexOwner->uWake();               // wake up next task to use this mutex object
          } // if
      } else {
          // priority-inheritance, bump up priority of mutexowner from head
          // of prioritized entry queue (NOT leaving task), because suspended
          // stack is not prioritized.

          if ( uEntryList.uExecuteHooks ) {
            lock.acquire();                     // acquire entry lock to prevent inversion during transfer 
            uMutexOwner = &(uAcceptSignalled.uDrop()->uGet()); // next task to gain control of the mutex object
            if ( uCheckHookConditions( &t) ) uEntryList.uOnRelease( t );
            if ( uCheckHookConditions( uMutexOwner ) ) uEntryList.uOnAcquire( *uMutexOwner );
#ifdef __U_DEBUG_H__
            uDebugPrt( "(uSerial &)0x%p.uLeave, waking task %.256s (0x%p)\n", this, uMutexOwner->uGetName(), uMutexOwner );
#endif // __U_DEBUG_H__
            uMutexOwner->uWake();               // wake up next task to use this mutex object
            if ( &t == uDestructorTask ) uResetDestructorStatus();
            lock.release();
          } else {
#ifdef __U_DEBUG_H__
            uDebugPrt( "(uSerial &)0x%p.uLeave, waking task %.256s (0x%p)\n", this, uMutexOwner->uGetName(), uMutexOwner );
#endif // __U_DEBUG_H__
            uMutexOwner = &(uAcceptSignalled.uDrop()->uGet()); // next task to gain control of the mutex object
            if ( &t == uDestructorTask ) {
                lock.acquire();
                uResetDestructorStatus();
                lock.release();
            } // if
            uMutexOwner->uWake();               // wake up next task to use this mutex object
          } // if
      } // if
      t.uMutexRecursion = mr;                   // restore previous recursive count
    } // if

#ifdef __U_DEBUG_H__
    uDebugPrt( "(uSerial &)0x%p.uLeave, mask:0x%x,0x%x,0x%x,0x%x, owner:0x%p\n",
             this, uMask[0], uMask[1], uMask[2], uMask[3], uMutexOwner );
#endif // __U_DEBUG_H__
} // uSerial::uLeave

void uSerial::uLeave2() {                       // used when a task is leaving a mutex and has queued itself before calling
    uBaseTask &t = uThisTask();                       // optimization

    if ( acceptMask ) {
      // lock is acquired and mask set by accept statement
      acceptMask = false;
      uMutexOwner = NULL;                       // reset no task in mutex object
      if ( uEntryList.uExecuteHooks && uCheckHookConditions( &t )  ) {
          uEntryList.uOnRelease( t );
      } // if
      if ( &t == uDestructorTask ) uResetDestructorStatus();
      uSCHEDULE( &lock );                       // find someone else to execute; release lock on kernel stack
    } else if ( uAcceptSignalled.uEmpty() ) {         // no tasks waiting re-entry to mutex object ?
      lock.acquire();
        if ( uDestructorStatus != DestrCalled ) {
          if ( uEntryList.uEmpty() ) {          // no tasks waiting entry to mutex object ?
            uMask.setAll();                     // accept all members
            uMask.clr( 0 );                     // except timeout
            uMutexOwner = NULL;
            if ( uEntryList.uExecuteHooks && uCheckHookConditions( &t ) ) {
                uEntryList.uOnRelease( t );
            } // if
            uSCHEDULE( &lock );                 // find someone else to execute; release lock on kernel stack
          } else {
            uMutexOwner = &(uEntryList.uDrop()->uGet()); // next task to gain control of the mutex object
            uMutexOwner->uCalledEntryMem->uRemove( &(uMutexOwner->uMutexRef) ); // also remove task from mutex queue
            if ( uEntryList.uExecuteHooks ) {
                if ( uCheckHookConditions( &t ) ) uEntryList.uOnRelease( t );
                if ( uCheckHookConditions( uMutexOwner ) ) uEntryList.uOnAcquire( *uMutexOwner );
            } // if
#ifdef __U_DEBUG_H__
            uDebugPrt( "(uSerial &)0x%p.uLeave2, waking task %.256s (0x%p)\n", this, uMutexOwner->uGetName(), uMutexOwner );
#endif // __U_DEBUG_H__
            uSCHEDULE( &lock, uMutexOwner );    // find someone else to execute; release lock and wake on kernel stack
          } // if
      } else {
         uMutexOwner = uDestructorTask;
         uDestructorStatus = DestrScheduled;
         if ( uEntryList.uExecuteHooks ) {
             if ( uCheckHookConditions( &t ) ) uEntryList.uOnRelease( t );
             // do not call acquire the hook for the destructor
         } // if
#ifdef __U_DEBUG_H__
          uDebugPrt( "(uSerial &)0x%p.uLeave2, waking task %.256s (0x%p)\n", this, uMutexOwner->uGetName(), uMutexOwner );
#endif // __U_DEBUG_H__
          uSCHEDULE( &lock, uMutexOwner );            // find someone else to execute; release lock and wake on kernel stack
      } // if
    } else {
      // priority-inheritance, bump up priority of mutexowner from head of
      // prioritized entry queue (NOT leaving task), because suspended stack
      // is not prioritized.

#ifdef __U_DEBUG_H__
      uDebugPrt( "(uSerial &)0x%p.uLeave2, waking task %.256s (0x%p)\n", this, uMutexOwner->uGetName(), uMutexOwner );
#endif // __U_DEBUG_H__
      if ( uEntryList.uExecuteHooks ) {
          lock.acquire();
          uMutexOwner = &(uAcceptSignalled.uDrop()->uGet()); // next task to gain control of the mutex object
          if ( uCheckHookConditions( &t ) ) uEntryList.uOnRelease( t );
          if ( uCheckHookConditions( uMutexOwner ) ) uEntryList.uOnAcquire( *uMutexOwner );
          uSCHEDULE( &lock, uMutexOwner );            // find someone else to execute; release lock and wake on kernel stack
      } else {
          uMutexOwner = &(uAcceptSignalled.uDrop()->uGet()); // next task to gain control of the mutex object
          uSCHEDULE( uMutexOwner );             // find someone else to execute; wake on kernel stack
      } // if
    } // if
#ifdef __U_DEBUG_H__
    uDebugPrt( "(uSerial &)0x%p.uLeave2, mask:0x%x,0x%x,0x%x,0x%x, owner:0x%p\n",
             this, uMask[0], uMask[1], uMask[2], uMask[3], uMutexOwner );
#endif // __U_DEBUG_H__
} // uSerial::uLeave2


// The field uSerial::uLastAcceptor is set in uAcceptTry and uAcceptPause and
// reset to NULL in uSerialMember::uSerialMember, so an exception can be thrown
// when all the guards in the accept statement fail.  This ensures that
// uLastAcceptor is set only when a task rendezouvs with another task.

void uSerial::uAcceptStart( unsigned int &uMutexMaskPosn ) {
    uBaseTask &t = uThisTask();                       // optimization
#ifdef __U_DEBUG__
    if ( &t != uMutexOwner ) {                        // must have mutex lock to wait
      uAbort( ": attempt to accept in a mutex object not locked by this task.\n"
            "Possible cause is accepting in a non-mutex member routine." );
    } // if
#endif // __U_DEBUG__

    if ( t.uProfileActive && uProfiler::uProfiler_RegisterAcceptStart ) { // task registered for profiling ?
      (*uProfiler::uProfiler_RegisterAcceptStart)( uProfiler::uProfilerInstance, *this, t );
    } // if

    lock.acquire();
    uMask.clrAll();
    uSerial::uMutexMaskPosn = &uMutexMaskPosn;
} // uSerial::uAcceptStart

bool uSerial::uAcceptTry( uBasePrioritySeq &ml, int mp ) {
    // lock is already acquired at beginning of accept statement
    if ( mp == __U_DESTRUCTORPOSN__ ) {               // ? destructor accepted
      // Handles the case where destructor has not been called or the
      // destructor has been scheduled.  If the destructor has been
      // scheduled, there is potential for synchronization deadlock when only
      // the destructor is accepted.
      if ( uDestructorStatus != DestrCalled ) {
          uMask.set( mp );                      // add this mutex member to the mask
          return false;                   // the accept failed
      } else {
          uBaseTask &t = uThisTask();                 // optimization
          uMutexOwner = uDestructorTask;        // next task to use this mutex object
          uDestructorStatus = DestrScheduled;         // change status of destructor to scheduled
          uLastAcceptor = &t;                   // saving the acceptor thread of a rendezvous
          uMask.clrAll();                       // clear the mask
          uAcceptSignalled.uAdd( &(t.uMutexRef) );    // suspend current task on top of accept/signalled stack
          if ( uEntryList.uExecuteHooks ) {
            // no check for destructor because it cannot accept itself
            if ( uCheckHookConditions( &t ) ) uEntryList.uOnRelease( t );  
            // do not call the acquire hook for the destructor
          } // if
          uSCHEDULE( &lock, uMutexOwner );            // find someone else to execute; release lock and wake on kernel stack
          if ( t.uAcceptedCall ) {              // accepted entry is suspended if true
            t.uAcceptedCall->uAcceptorSuspended = false; // acceptor resumes
            t.uAcceptedCall = NULL;
          } // if
          uEnable <uSerial::uRendezvousFailure> {
            uEHM::uPoll();
          } // uEnable
          return true;
      } // if
    } else {
      if ( ml.uEmpty() ) {
          uMask.set( mp );                      // add this mutex member to the mask
          return false;                   // the accept failed
      } else {
          uBaseTask &t = uThisTask();                 // optimization
          uMutexOwner = &(ml.uDrop()->uGet());  // next task to use this mutex object
          uLastAcceptor = &t;                   // saving the acceptor thread of a rendezvous
          uEntryList.uRemove( &(uMutexOwner->uEntryRef) ); // also remove task from entry queue
          uMask.clrAll();                       // clear the mask
          uAcceptSignalled.uAdd( &(t.uMutexRef) );    // suspend current task on top of accept/signalled stack
          if ( uEntryList.uExecuteHooks ) {
            if ( uCheckHookConditions( &t ) ) uEntryList.uOnRelease( t );  
            if ( uCheckHookConditions( uMutexOwner ) ) uEntryList.uOnAcquire( *uMutexOwner );
          } // if
          uSCHEDULE( &lock, uMutexOwner );            // find someone else to execute; release lock and wake on kernel stack
          if ( t.uAcceptedCall ) {              // accepted entry is suspended if true
            t.uAcceptedCall->uAcceptorSuspended = false; // acceptor resumes
            t.uAcceptedCall = NULL;
          } // if
          uEnable <uSerial::uRendezvousFailure> {
            uEHM::uPoll();
          } // uEnable
          return true;
      } // if
    } // if
} // uSerial::uAcceptTry

void uSerial::uAcceptTry() {
    // lock is already acquired at beginning of accept statement
    uMask.set( 0 );                             // add this mutex member to the mask, 0 => timeout mask bit
} // uSerial::uAcceptTry

bool uSerial::uAcceptTry2( uBasePrioritySeq &ml, int mp ) {
    // lock is already acquired at beginning of accept statement
    if ( mp == __U_DESTRUCTORPOSN__ ) {               // ? destructor accepted
      // Handles the case where destructor has not been called or the
      // destructor has been scheduled.  If the destructor has been
      // scheduled, there is potential for synchronization deadlock when only
      // the destructor is accepted.
      if ( uDestructorStatus != DestrCalled ) {
          uMask.set( mp );                      // add this mutex member to the mask
          return false;                   // the accept failed
      } else {
          uBaseTask *acceptedTask = uDestructorTask;  // next task to use this mutex object
          uDestructorStatus = DestrScheduled;         // change status of destructor to scheduled
          uMask.clrAll();                       // clear the mask
          lock.release();
          uAcceptSignalled.uAdd( &(acceptedTask->uMutexRef) ); // move accepted task on top of accept/signalled stack
          return true;
      } // if
    } else {
      if ( ml.uEmpty() ) {
          uMask.set( mp );                      // add this mutex member to the mask
          return false;                   // the accept failed
      } else {
          uBaseTask *acceptedTask = &(ml.uDrop()->uGet()); // next task to use this mutex object
          uEntryList.uRemove( &(acceptedTask->uEntryRef) ); // also remove task from entry queue
          uMask.clrAll();                       // clear the mask
          lock.release();
            uAcceptSignalled.uAdd( &(acceptedTask->uMutexRef) ); // move accepted task on top of accept/signalled stack
          return true;
      } // if
    } // if
} // uSerial::uAcceptTry2

bool uSerial::uAcceptTestMask() {
    return uMask.isAllClr();
} // uSerial::uAcceptTestMask

void uSerial::uAcceptElse() {
    uMask.clrAll();
    lock.release();
} // uSerial::uAcceptElse

void uSerial::uAcceptMask() {
    // The lock acquired at the start of the accept statement cannot be
    // released here, otherwise, it is necessary to recheck the mutex queues
    // before exit. As a consequence, all destructors between here and
    // ~uSerialMember (which executes uLeave) are executed with the mutex lock
    // closed, preventing tasks from queuing on this mutex object.
    acceptMask = true;
} // uSerial::uAcceptMask

void uSerial::uAcceptPause() {
#ifdef __U_DEBUG_H__
    uDebugPrt( "(uSerial &)0x%p.uAcceptPause, mask:0x%x,0x%x,0x%x,0x%x, owner:0x%p\n",
             this, uMask[0], uMask[1], uMask[2], uMask[3], uMutexOwner );
#endif // __U_DEBUG_H__
    // lock is acquired at beginning of accept statement
    uBaseTask &t = uThisTask();                       // optimization
    uLastAcceptor = &t;                         // saving the acceptor thread of a rendezvous
    uAcceptSignalled.uAdd( &(t.uMutexRef) );          // suspend current task on top of accept/signalled stack
    uMutexOwner = NULL;
    if ( uEntryList.uExecuteHooks && uCheckHookConditions( &t ) ) {
      uEntryList.uOnRelease( t );
    } // if
    uSCHEDULE( &lock );                         // find someone else to execute; release lock on kernel stack
    if ( t.uAcceptedCall ) {                    // accepted entry is suspended if true
        t.uAcceptedCall->uAcceptorSuspended = false;  // acceptor resumes
      t.uAcceptedCall = NULL;
    } // if
    uEnable <uSerial::uFailure> {
      uEHM::uPoll();
    } // uEnable
} // uSerial::uAcceptPause

void uSerial::uAcceptPause( uDuration duration ) {
    uAcceptPause( uActiveProcessorKernel->uKernelClock.uGetTime() + duration );
} // uSerial::uAcceptPause

void uSerial::uAcceptPause( uTime time ) {
    uBaseTask &t = uThisTask();                       // optimization
#ifdef __U_DEBUG_H__
    uDebugPrt( "(uSerial &)0x%p.uAcceptPause, mask:0x%x,0x%x,0x%x,0x%x, owner:0x%p\n",
             this, uMask[0], uMask[1], uMask[2], uMask[3], uMutexOwner );
#endif // __U_DEBUG_H__
    uTimeoutHndlr handler( t, *this );                // handler to wake up blocking task

    uTimeoutEvent.uExecuteLocked = true;
    uTimeoutEvent.timerT = time;
    uTimeoutEvent.uWho = &t;
    uTimeoutEvent.SigHandler = &handler;

    uProc = &uThisProcessor();
    uEvents = uProc->uEvents;
    uEvents->uAddEvent( uTimeoutEvent, *uProc );

    // lock is acquired at beginning of accept statement
    uBaseTask &uCallingTask = t;                // optimization
    uLastAcceptor = &uCallingTask;              // saving the acceptor thread of a rendezvous
    uAcceptSignalled.uAdd( &(uCallingTask.uMutexRef) );     // suspend current task on top of accept/signalled stack

    uMutexOwner = NULL;
    if ( uEntryList.uExecuteHooks && uCheckHookConditions( &uCallingTask ) ) {
      uEntryList.uOnRelease( uCallingTask );
    } // if
    uSCHEDULE( &lock );                         // find someone else to execute; release lock on kernel stack

    uEvents->uRemoveEvent( uTimeoutEvent, *uProc );

    if ( uCallingTask.uAcceptedCall ) {               // accepted entry is suspended if true
        uCallingTask.uAcceptedCall->uAcceptorSuspended = false;   // acceptor resumes
      uCallingTask.uAcceptedCall = NULL;
    } // if
    uEnable <uSerial::uFailure> {
      uEHM::uPoll();
    } // uEnable
} // uSerial::uAcceptPause

void uSerial::uRemoveTimeout() {
    if ( uEvents != NULL ) {
      uEvents->uRemoveEvent( uTimeoutEvent, *uProc );
    } // if
} // uSerial::uRemoveTimeout

void uSerial::uAcceptEnd() {
    uMutexMaskPosn = NULL;

    if ( uThisTask().uProfileActive && uProfiler::uProfiler_RegisterAcceptEnd ) { // task registered for profiling ?              
      (*uProfiler::uProfiler_RegisterAcceptEnd)( uProfiler::uProfilerInstance, *this, uThisTask() );
    } // if
} // uSerial::uAcceptEnd


uSerial::uProtectAcceptStmt::uProtectAcceptStmt(uSerial &s) : s(s) {
    s.uAcceptStart( uMutexMaskPosn );
} // uSerial::uProtectAcceptStmt::uProtectAcceptStmt

uSerial::uProtectAcceptStmt::uProtectAcceptStmt(uSerial &s, bool ) : s(s) {
    s.uRemoveTimeout();
    s.uAcceptStart( uMutexMaskPosn );
} // uSerial::uProtectAcceptStmt::uProtectAcceptStmt

uSerial::uProtectAcceptStmt::~uProtectAcceptStmt() {
    s.uAcceptEnd();
} // uSerial::uProtectAcceptStmt::~uProtectAcceptStmt


uSerial::uFailure::uFailure( const uSerial *const serial, const char *const msg ) : uKernelFailure( msg ), serial( serial ) {}

uSerial::uFailure::uFailure( const char *const msg ) : uKernelFailure( msg ), serial( NULL ) {}

uSerial::uFailure::~uFailure() {}

const uSerial *const uSerial::uFailure::uSerialID() { return serial; }

void uSerial::uFailure::defaultTerminate() const {
    uAbort( "Unhandled exception of type uSerial::uFailure : %.256s", message() );
} // uSerial::uFailure::defaultTerminate


uSerial::uEntryFailure::uEntryFailure( const uSerial *const serial, const char *const msg ) : uSerial::uFailure( serial, msg ) {}

uSerial::uEntryFailure::uEntryFailure( const char *const msg ) : uSerial::uFailure( msg ) {}

uSerial::uEntryFailure::~uEntryFailure() {}

void uSerial::uEntryFailure::defaultTerminate() const {
    uAbort( "Unhandled exception of type uSerial::uEntryFailure : while executing mutex destructor, task %.256s (0x%p) found %.256s.",
          sourceName(), &source(), message() );
} // uSerial::uEntryFailure::defaultTerminate


uSerial::uRendezvousFailure::uRendezvousFailure( const uSerial *const serial_id, const char *const msg ) : uSerial::uFailure( serial_id, msg ), caller( &uThisCoroutine() ) {}

uSerial::uRendezvousFailure::~uRendezvousFailure() {}

const uBaseCoroutine *const uSerial::uRendezvousFailure::uCaller() { return caller; }

void uSerial::uRendezvousFailure::defaultTerminate() const {
    uAbort( "Unhandled exception of type uSerial::uRendezvousFailure : %.256s from task %.256s (0x%p) to mutex member of task %.256s (0x%p).",
          message(), sourceName(), &source(), uThisTask().uGetName(), &uThisTask() );
} // uSerial::uRendezvousFailure::defaultTerminate


uInitEvent(uSerial::uFailure);
uInitEvent(uSerial::uEntryFailure);
uInitEvent(uSerial::uRendezvousFailure);


uTaskConstructor::uTaskConstructor( uAction f, uSerial &s, uBaseTask &t, uBasePIQ &piq, const char *n, bool profile ) {
    if ( f == uYes ) {
      t.uStartHere( (void (*)( uMachContext & ))uMachContext::uInvokeTask );
      t.uName = n;
      t.serial = &s;                            // set task's serial instance
      t.uProfileActive = profile;
      t.uPIQ = &piq;

      if ( t.uProfileActive && uProfiler::uProfiler_RegisterTask ) { // profiling this task & task registered for profiling ? 
          (*uProfiler::uProfiler_RegisterTask)( uProfiler::uProfilerInstance, t, s, uThisTask() );
      } // if

      s.uAcceptSignalled.uAdd( &(t.uMutexRef) );

#if __U_LOCALDEBUGGER_H__
      if ( uLocalDebugger::uLocalDebuggerActive ) uLocalDebugger::uLocalDebuggerInstance->checkPoint();
#endif // __U_LOCALDEBUGGER_H__

      t.uCurrCluster->uTaskAdd( t );                  // add task to the list of tasks on this cluster
    } // if
} // uTaskConstructor::uTaskConstructor


uTaskDestructor::uTaskDestructor( uAction f, uBaseTask &t ) : f( f ), t( t ) {
} // uTaskDestructor::uTaskDestructor

uTaskDestructor::~uTaskDestructor() {
    if ( f == uYes ) {
#ifdef __U_DEBUG__
      if ( t.uBaseCoroutine::uGetState() != uBaseCoroutine::uHalt ) {
          uAbort( ": attempt to delete task %.256s (0x%p) that is not halted.\n"
                "Possible cause is task blocked on a condition queue.",
                t.uGetName(), &t );
      } // if
#endif // __U_DEBUG__

#if __U_LOCALDEBUGGER_H__
      if ( uLocalDebugger::uLocalDebuggerActive ) uLocalDebugger::uLocalDebuggerInstance->checkPoint();
#endif // __U_LOCALDEBUGGER_H__

      t.uProfileActive = false;
      if ( t.uProfileSamplerInstance && uProfiler::uProfiler_DeregisterTask ) { // task registered for profiling ?
          (*uProfiler::uProfiler_DeregisterTask)( uProfiler::uProfilerInstance, t );
      } // if

      t.uCurrCluster->uTaskRemove( t );         // remove the task from the list of tasks that live on this cluster.
    } // if
} // uTaskDestructor::uTaskDestructor


uTaskMain::uTaskMain( uBaseTask &t ) : t( t ) {
    // SKULLDUGGERY: To allow "main" to be treated as a normal member routine,
    // a counter is used to allow recursive entry.

    t.uRecursion += 1;
    if ( t.uRecursion == 1 ) {                        // first call ?
      if ( t.uProfileActive && uProfiler::uProfiler_RegisterTaskStartExecution ) { 
          (*uProfiler::uProfiler_RegisterTaskStartExecution)( uProfiler::uProfilerInstance, t ); 
      } // if
#if __U_LOCALDEBUGGER_H__
      // Registering a task with the global debugger must occur in this
      // routine for the register set to be correct.

      if ( uLocalDebugger::uLocalDebuggerActive ) {
          uLocalDebugger::uLocalDebuggerInstance->createULThread();
      } // if
#endif // __U_LOCALDEBUGGER_H__
    } // if
} // uTaskMain::uTaskMain


uTaskMain::~uTaskMain() {
    t.uRecursion -= 1;
    if ( t.uRecursion == 0 ) {
      if ( t.uProfileActive && uProfiler::uProfiler_RegisterTaskEndExecution ) {
          (*uProfiler::uProfiler_RegisterTaskEndExecution)( uProfiler::uProfilerInstance, t ); 
      } // if   
#if __U_LOCALDEBUGGER_H__
      if ( uLocalDebugger::uLocalDebuggerActive ) uLocalDebugger::uLocalDebuggerInstance->destroyULThread();
#endif // __U_LOCALDEBUGGER_H__
    } // if
} // uTaskMain::~uTaskMain


uSerialConstructor::uSerialConstructor( uAction f, uSerial &s ) : f( f ), s( s ) {
} // uSerialConstructor::uSerialConstructor

uSerialConstructor::uSerialConstructor( uAction f, uSerial &s, const char *n ) : f( f ), s( s ) {
    if ( f == uYes ) {
      if ( uThisTask().uProfileActive && uProfiler::uProfiler_RegisterMonitor ) { // task registered for profiling ?
          (*uProfiler::uProfiler_RegisterMonitor)( uProfiler::uProfilerInstance, s, n, uThisTask() );
      } // if
    } // if
} // uSerialConstructor::uSerialConstructor

uSerialConstructor::~uSerialConstructor() {
    if ( f == uYes ) {
      uBaseTask &t = uThisTask();               // optimization

      // There is no place to store the caller's mutex recursion value during
      // construction (normally stored in uSerialMember), so the following
      // trick is used. The caller's mutex-recursion counter is used during
      // construction even though it may already have a value, i.e., from
      // previous recursive calls. Any recursion during construction simply
      // adds to and eventually subtracts from this counter, but it always
      // returns to the initial calling value.  However, uLeave needs a value
      // of 0 to work correctly for exit from the constructor. Therefore, the
      // counter is set to 0 but the previous value is passed to uLeave to be
      // reset on exit.

      int mr = t.uMutexRecursion;               // save previous recursive count
      t.uMutexRecursion = 0;                    // reset recursive count
      s.uLeave( mr );
    } // if
} // uSerialConstructor::~uSerialConstructor


uSerialDestructor::uSerialDestructor( uAction f, uSerial &s, uBasePrioritySeq &ml, int mp ) : f( f ), s( s ) {
    if ( f == uYes ) {
      uBaseTask &t = uThisTask();               // optimization
#ifdef __U_DEBUG__
      nlevel = t.uCurrSerialLevel += 1;
#endif // __U_DEBUG__
      s.uEnterDestructor( mr, ml, mp );
      if ( ! s.uAcceptSignalled.uEmpty() ) {
          s.uLastAcceptor = NULL;
          uBaseTask &uCallingTask = t;          // optimization
          s.uMutexOwner = &(s.uAcceptSignalled.uDrop()->uGet());
          s.uAcceptSignalled.uAdd( &(uCallingTask.uMutexRef) ); // suspend terminating task on top of accept/signalled stack
          uSCHEDULE( s.uMutexOwner );                 // find someone else to execute; wake on kernel stack
      } // if
    } // if
} // uSerialDestructor::uSerialDestructor

uSerialDestructor::~uSerialDestructor() {
    if ( f == uYes ) {
      uBaseTask &t = uThisTask();               // optimization
      // Useful for dynamic allocation if an exception is thrown in the
      // destructor so the object can continue to be used and deleted again.
      if ( std::uncaught_exception() ) {
#ifdef __U_DEBUG__
          if ( nlevel != t.uCurrSerialLevel ) {
            uAbort(": attempt to perform a non-nested entry and exit from multiple accessed mutex objects.");         
          } // if
          t.uCurrSerialLevel -= 1;
#endif // __U_DEBUG__
          s.uLeave( mr );
      } else {
          if ( t.uProfileActive && uProfiler::uProfiler_DeregisterMonitor ) { // task registered for profiling ?
            (*uProfiler::uProfiler_DeregisterMonitor)( uProfiler::uProfilerInstance, s, t );
          } // if

          t.uMutexRecursion = mr;         // restore previous recursive count
      } // if
    } // if
} // uSerialDestructor::~uSerialDestructor

uSerialMember::uSerialMember( uSerial &s, uBasePrioritySeq &ml, int mp ) : s( s ) {
    uBaseTask &t = uThisTask();                 // optimization

    // There is a race condition between setting and testing this flag.
    // However, it is the best that can be expected because the mutex storage
    // is being deleted.

    if ( ! s.uAlive ) {                         // check against improper memory management
      uThrow uSerial::uEntryFailure( &s, "mutex object has been destroyed" );
    } // if

    if ( t.uProfileActive && uProfiler::uProfiler_RegisterMutexFunctionEntryTry ) { // task registered for profiling ?
      (*uProfiler::uProfiler_RegisterMutexFunctionEntryTry)( uProfiler::uProfilerInstance, s, t );
    } // if

    try {
      // Polling in uEnter happens after properly setting values of mr and
      // therefore, in the catch clause, it can be used to retore the mr
      // value in the uSerial object.

#ifdef __U_DEBUG__
      nlevel = t.uCurrSerialLevel += 1;
#endif // __U_DEBUG__
      s.uEnter( mr, ml, mp );
        acceptor = s.uLastAcceptor;
      uAcceptorSuspended = acceptor != NULL;
      if ( uAcceptorSuspended ) {
          acceptor->uAcceptedCall = this;
      } // if
      s.uLastAcceptor = NULL;                   // avoid messing up subsequent mutex method invocation
    } catch( ... ) {
#ifdef __U_DEBUG__
      if ( nlevel != t.uCurrSerialLevel ) {
          uAbort(": attempt to perform a non-nested entry and exit from multiple accessed mutex objects.");     
      } // if
      t.uCurrSerialLevel -= 1;
#endif // __U_DEBUG__
        if ( s.uLastAcceptor ) {
          s.uLastAcceptor->uAcceptedCall = NULL;      // the rendezvous did not materialize
          uRaise uSerial::uRendezvousFailure( &s, "accepted call fails" ) uAt *s.uLastAcceptor->uCurrCoroutine; // acceptor is not initialized
          s.uLastAcceptor = NULL;
      } // if
      s.uLeave( mr );                           // look at ~uSerialMember()
      uThrow;
    } // try

    noUserOverride = true;

    if ( t.uProfileActive && uProfiler::uProfiler_RegisterMutexFunctionEntryDone ) { // task registered for profiling ?
      (*uProfiler::uProfiler_RegisterMutexFunctionEntryDone )( uProfiler::uProfilerInstance, s, t );
    } // if
} // uSerialMember::uSerialMember

uSerialMember::~uSerialMember() {
    uBaseTask &t = uThisTask();                       // optimization
#ifdef __U_DEBUG__
    if ( nlevel != t.uCurrSerialLevel ) {
      uAbort(": attempt to perform a non-nested entry and exit from multiple accessed mutex objects.");         
    } // if
    t.uCurrSerialLevel -= 1;
#endif // __U_DEBUG__
    if ( acceptor ) {
      acceptor->uAcceptedCall = NULL;                 // accepted mutex member terminates
      // raise a concurrent exception at the acceptor
      if ( std::uncaught_exception() && noUserOverride && s.uAlive && uAcceptorSuspended ) {
          uRaise uSerial::uRendezvousFailure( &s, "accepted call fails" ) uAt *uAcceptor();
      } // if
    } // if

    if ( t.uProfileActive && uProfiler::uProfiler_RegisterMutexFunctionExit ) { // task registered for profiling ?
      (*uProfiler::uProfiler_RegisterMutexFunctionExit)( uProfiler::uProfilerInstance, s, t );
    } // if

    s.uLeave( mr );
} // uSerialMember::~uSerialMember

uBaseCoroutine *uSerialMember::uAcceptor() {
    noUserOverride = false;
    return ( s.uAlive && uAcceptorSuspended ) ? acceptor->uCurrCoroutine : NULL;
} // uSerialMember::uAcceptor


uCondition::uCondition() : owner( NULL ) {
} // uCondition::uCondition

uCondition::~uCondition() {
    // A uCondition object must be destroyed before its owner.  Concurrent
    // execution of the destructor for a uCondition and its owner is
    // unacceptable.  The flag owner->uAlive tells if a mutex object is
    // destroyed or not but it cannot protect against concurrent execution.  As
    // long as uCondition objects are declared inside its owner mutex object,
    // the proper order of destruction is guaranteed.

    if ( ! uCondQueue.uEmpty() ) {
      // wake each task blocked on the condition with an async event
      for ( ;; ) {
          uBaseTaskDL *p = uCondQueue.uHead();  // get the task blocked at the start of the condition
        if ( p == NULL ) break;                 // list empty ?
          uEHM::uDeliverAEStack dummy( false ); // block all async exceptions in destructor
          uBaseTask &t = p->uGet();
          uThrow uWaitingFailure( this, "found blocked task on condition variable during deletion" ) uAt *(t.uCurrCoroutine); // throw async event at blocked task
          uSBlock( *owner );                    // restart (signal) the blocked task
      } // for
    } // if
} // uCondition::~uCondition

bool uCondition::empty() const {                // test for tasks on a condition
    return uCondQueue.uEmpty();                       // check if the condition queue is empty
} // uCondition::empty

long int uCondition::front() const {                  // return task information
#ifdef __U_DEBUG__
    if ( uCondQueue.uEmpty() ) {                // condition queue must not be empty
      uAbort( ": attempt to access user data on an empty condition.\n"
            "Possible cause is not checking if the condition is empty before reading stored data." );
    } // if
#endif // __U_DEBUG__
    return uCondQueue.uHead()->uGet().uInfo;          // return condition information stored with blocked task
} // uCondition::front


void uCondition::uW( uSerial &s ) {             // wait on a condition
    uBaseTask &t = uThisTask();                       // optimization
#ifdef __U_DEBUG__
    if ( &t != s.uMutexOwner ) {                // must have mutex lock to wait
      uAbort( ": attempt to wait on a condition variable in a mutex object not locked by this task.\n"
            "Possible cause is waiting on a condition variable in a non-mutex member routine." );
    } // if
#endif // __U_DEBUG__
    if ( owner != &s ) {                        // only owner can use condition
      if ( owner == NULL ) {                    // owner exist ?
          owner = &s;                           // set condition owner
#ifdef __U_DEBUG__
      } else {
          uAbort( ": attempt to wait on a condition variable not owned by this mutex object.\n"
                "Possible cause is passing the condition variable outside of the mutex object in which it is created but after it has been used." );
#endif // __U_DEBUG__
      } // if
    } // if
    if ( t.uProfileActive && uProfiler::uProfiler_RegisterWait ) { // task registered for profiling ?
      (*uProfiler::uProfiler_RegisterWait)( uProfiler::uProfilerInstance, *this, t, s );
    } // if

    uCondQueue.uAdd( &(t.uMutexRef) );                // add to end of condition queue

    uEnable <uSerial::uFailure> {               // the list of exception enabled has to be determined
        s.uLeave2();                            // release mutex and let it schedule another task
      uEHM::uPoll();                            // uLeave2 does not poll for async exceptions
    } // uEnable

    if ( t.uProfileActive && uProfiler::uProfiler_RegisterReady ) { // task registered for profiling ?
      (*uProfiler::uProfiler_RegisterReady)( uProfiler::uProfilerInstance, *this, t, s );
    } // if
} // uCondition::uW

void uCondition::uW( uSerial &s, long int info ) {    // wait on a condition with information
    uThisTask().uInfo = info;                   // store the information with this task
    uW( s );                                    // wait on this condition
} // uCondition::uW

void uCondition::uS( uSerial &s ) {             // signal a condition
    if ( ! uCondQueue.uEmpty() ) {
      uBaseTask &t = uThisTask();               // optimization
#ifdef __U_DEBUG__
      if ( &t != s.uMutexOwner ) {              // must have mutex lock to signal
          uAbort( ": attempt to signal a condition variable in a mutex object not locked by this task.\n"
                "Possible cause is signalling a condition variable in a non-mutex member routine." );
      } // if
      if ( owner != &s ) {                      // only owner can use condition
          uAbort( ": attempt to signal a condition variable not owned by this mutex object.\n"
                "Possible cause is passing the condition variable outside of the mutex object in which it is created but after it has been used." );
      } // if
#endif // __U_DEBUG__
      if ( t.uProfileActive && uProfiler::uProfiler_RegisterSignal ) { // task registered for profiling ?
          (*uProfiler::uProfiler_RegisterSignal)( uProfiler::uProfilerInstance, *this, t, s );
      } // if

      s.uAcceptSignalled.uAdd( uCondQueue.uDrop() );  // move signalled task on top of accept/signalled stack
    } // if
} // uCondition::uS

void uCondition::uSBlock( uSerial &s ) {        // signal a condition
    if ( ! uCondQueue.uEmpty() ) {
      uBaseTask &t = uThisTask();               // optimization
#ifdef __U_DEBUG__
      if ( &t != s.uMutexOwner ) {              // must have mutex lock to signal
          uAbort( ": attempt to signal a condition variable in a mutex object not locked by this task.\n"
                "Possible cause is signalling a condition variable in a non-mutex member routine." );
      } // if
      if ( owner != &s ) {                      // only owner can use condition
          uAbort( ": attempt to signal a condition variable not owned by this mutex object.\n"
                "Possible cause is passing the condition variable outside of the mutex object in which it is created but after it has been used." );
      } // if
#endif // __U_DEBUG__
      if ( t.uProfileActive && uProfiler::uProfiler_RegisterSignal ) { // task registered for profiling ?
          (*uProfiler::uProfiler_RegisterSignal)( uProfiler::uProfilerInstance, *this, t, s );
      } // if
      if ( t.uProfileActive && uProfiler::uProfiler_RegisterWait ) { // task registered for profiling ?
          (*uProfiler::uProfiler_RegisterWait)( uProfiler::uProfilerInstance, *this, t, s );
      } // if

      s.uAcceptSignalled.uAdd( &(t.uMutexRef) );      // suspend signaller task on accept/signalled stack
      s.uAcceptSignalled.uAddHead( uCondQueue.uDrop() ); // move signalled task on head of accept/signalled stack
      s.uLeave2();                              // release mutex and let it schedule the signalled task

      if ( t.uProfileActive && uProfiler::uProfiler_RegisterReady ) { // task registered for profiling ?
          (*uProfiler::uProfiler_RegisterReady)( uProfiler::uProfilerInstance, *this, t, s );
      } // if
    } // if
} // uCondition::uSBlock


uCondition::uWaitingFailure::uWaitingFailure( const uCondition *const cond, const char *const msg ) : uSerial::uFailure( cond->owner, msg ), cond( cond ) {}

uCondition::uWaitingFailure::~uWaitingFailure() {}

const uCondition *const uCondition::uWaitingFailure::uConditionID() const { return cond; }

void uCondition::uWaitingFailure::defaultTerminate() const {
    uAbort( "Unhandled exception of type uCondition::uWaitingFailure : task %.256s (0x%p) found blocked task %.256s (0x%p) on condition variable 0x%p during deletion.",
          sourceName(), &source(), uThisTask().uGetName(), &uThisTask(), uConditionID() );
} // uCondition::uWaitingFailure::defaultTerminate

uInitEvent(uCondition::uWaitingFailure);


//######################### uMain #########################


uMain::uMain( uCluster &cluster, int size, int argc, char *argv[], int &retcode ) :
      uBaseTask( cluster, size ), argc( argc ), argv( argv ), uRetCode( retcode ) {
    // task uMain is always profiled when the profiler is active
    uProfileActivate( *uKernelModule::uTaskBoot );    // make boot task the parent
} // uMain::uMain


uMain::~uMain() {
    if ( uKernelModule::uTaskSystem->pthreadDetach ) {
      uKernelModule::uTaskSystem->pthreadCheck();
    } // if 
} // uMain::~uMain


//######################### Kernel Boot #########################


#if defined( __irix__ ) && ! defined( __U_MULTI__ )
void uNullFunc( void *, unsigned int ) {
    // this function exists only to give the dummy sproc somewhere to go
}
#endif // __irix__ && ! __U_MULTI__


void uKernelBoot::startup() {
    if ( ! uKernelModule::uKernelModuleInitialized ) {
      uKernelModule::startup();
    } // if

#ifdef __U_DEBUG__
    // Storage allocated before the start of uC++ is normally not freed until
    // after uC++ completes (if at all). Hence, this storage is not considered
    // when calculating unfreed storage when the heap's destructor is called in
    // finishup.

    if ( uHeapManager::uHeapManagerInstance == NULL ) {
      uHeapManager::boot();
    } // if
    uHeapManager::PreAlloc = uHeapManager::uHeapManagerInstance->uCheckFree();
#endif // __U_DEBUG__

    // create kernel locks

    uKernelModule::uGlobalAbortLock = new uSpinLock;
    uKernelModule::uGlobalProcessorLock = new uSpinLock;
    uKernelModule::uGlobalClusterLock = new uSpinLock;

#ifdef __U_DEBUG_H__
    uDebugPrt( "uKernelBoot::startup1, uDisableInt:%d, uDisableIntCnt:%d\n",
             THREAD_GETMEM( uDisableInt ), THREAD_GETMEM( uDisableIntCnt ) );
#endif // __U_DEBUG_H__

    // initialize kernel signal handlers

    uSigHandlerModule();

    // create global lists

    uKernelModule::uGlobalProcessors = new uProcessorSeq;
    uKernelModule::uGlobalClusters = new uClusterSeq;

    // SKULLDUGGERY: Initialize the global pointers with the appropriate memory
    // locations and then "new" the cluster and processor. Because the global
    // pointers are initialized, all the references through them in the cluster
    // and processor constructors work out. (HA!)

    uKernelModule::uSystemProcessor = (uProcessor *)&uKernelModule::uSystemProcessorStorage;
    uKernelModule::uSystemCluster = (uCluster *)&uKernelModule::uSystemClusterStorage;
    uKernelModule::uSystemScheduler = new uDefaultScheduler;

#if defined( __irix__ )
#ifdef __U_MULTI__
    // Irix has a default limit of 8 KTs (sprocs) in the default share group.
    // This must be changed *before* the first call to sproc or sprocsp.  There
    // can be at most 10000 in a share group.
    usconfig( CONF_INITUSERS, 256 );
#else // ! __U_MULTI__
    // SKULLDUGGERY: libc only provides locking around standard library functions
    // after sproc has been called.  Since there would otherwise be no calls to
    // sproc in a uniprocessor program, make a dummy call here.  uNullFunc returns
    // immediately, so the sproc created here has a short life.

    // Asking sproc to allocate a stack seems to be an expensive operation.
    #define U_DUMMY_STACK_SIZE 4000
    static char dummy_stack[ U_DUMMY_STACK_SIZE ];
    pid_t pid = sprocsp( uNullFunc, PR_SADDR | PR_SFDS | PR_SUMASK, NULL, dummy_stack + U_DUMMY_STACK_SIZE - 8, U_DUMMY_STACK_SIZE );
    if ( pid == -1 ) {
      uAbort( "uKernelBoot::startup, sprocsp, internal error, error(%d) %s.", errno, strerror( errno ) );
    } // if

    // uProcWait and uSigChldHandler race to wait on the pid for normal
    // termination so the loser ignores the ECHILD.
    int code = waitpid( pid, NULL, 0 );
    if ( code == -1 && errno != ECHILD ) {
      uAbort( "uKernelBoot::startup, waitpid, internal error, error(%d) %s.", errno, strerror( errno ) );
    } // if
#endif // ! __U_MULTI__
#endif // __irix__

    // create system cluster: it is at a fixed address so storing the result is unnecessary.

    new((void *)&uKernelModule::uSystemClusterStorage) uCluster( *uKernelModule::uSystemScheduler, uDefaultStackSize(), "uSystemCluster" );
    new((void *)&uKernelModule::uSystemProcessorStorage) uProcessor( *uKernelModule::uSystemCluster, 1.0 );
#ifndef __U_MULTI__
    uProcessor::uEvents = new uEventList;
    uProcessor::uContextSwitchHandler = new uCxtSwtchHndlr;
    uProcessor::uContextEvent = new uEventNode( *uProcessor::uContextSwitchHandler );
    uCluster::NBIO = new uNBIO;
#endif // ! __U_MULTI__

    // create processor kernel

    THREAD_SETMEM( uProcessorKernelStorage, new uProcessorKernel );

    // set thread register in the new context

#if defined( __U_MULTI__ ) && defined( __U_SWAPCONTEXT__ )
#if defined( __linux__ ) && defined( __i386__ )
    ((ucontext_t *)THREAD_GETMEM(uProcessorKernelStorage)->uStorage)->uc_mcontext.gregs[REG_GS] = THREAD_GETMEM( ldtValue );
#elif defined( __linux__ ) && defined( __ia64__ )
    ((ucontext_t *)THREAD_GETMEM(uProcessorKernelStorage)->uStorage)->uc_mcontext.sc_gr[13] = (unsigned long)THREAD_GETMEM( threadPointer );
#elif defined( __solaris__ ) && defined( __sparc__ )
    ((ucontext_t *)THREAD_GETMEM(uProcessorKernelStorage)->uStorage)->uc_mcontext.gregs[REG_G7] = (int)(THREAD_GETMEM( uSelf ) );
#elif defined( __irix__ ) && defined( __mips__ )
    // No thread register => nothing needs to be set in the mcontext
#else
    #error uC++ internal error : unsupported architecture
#endif
#endif // __U_MULTI__ && __U_SWAPCONTEXT__

    // start boot task, which executes the global constructors and destructors

    uKernelModule::uTaskBoot = new((void *)&uKernelModule::uTaskBootStorage) uBootTask();

    // SKULLDUGGERY: Set the processor's last resumer to the boot task so it
    // returns to it when the processor coroutine terminates. This has to be
    // done explicitly because the kernel coroutine is never uResumed only
    // context switched to. Therefore, uLast is never set by uResume, and
    // subsequently copied to uStart.

    uActiveProcessorKernel->uLast = uKernelModule::uTaskBoot;

    // SKULLDUGGERY: Force a context switch to the system processor to set the
    // boot task's context to the current UNIX context. Hence, the boot task
    // does not begin through uInvoke, like all other tasks. It also starts the
    // system processor's companion task so that it is ready to receive
    // processor specific requests. The trick here is that uBootTask is on the
    // ready queue when this call is made. Normally, a task is not on a ready
    // queue when it is running. As result, there has to be a special check in
    // uSchedule to not assert that this task is NOT on a ready queue when it
    // starts the context switch.

    uThisTask().uYield();

    // create system task

    uKernelModule::uTaskSystem = new uSystemTask();


    // THE SYSTEM IS NOW COMPLETELY RUNNING


    // Obtain the addresses of the original set_terminate and set_unexpected
    // using the dynamic loader. Use the original versions to initialize the
    // hidden variables holding the terminate and unexpected routines. All
    // other references to set_terminate and set_unexpected refer to the uC++
    // ones.

    char *error;
    void *library;
#if defined( RTLD_NEXT )
    library = RTLD_NEXT;
#else
    // missing RTLD_NEXT => must hard-code library name
    library = dlopen( "libstdc++.so", RTLD_LAZY );
    if ( (error = dlerror()) != NULL ) {
      fprintf( stderr, "uKernelBoot::startup : internal error, %s\n", error );
      exit( -1 );
    } // if
#endif // RTLD_NEXT
    std::terminate_handler (*orig_set_terminate)( std::terminate_handler );
    orig_set_terminate = (std::terminate_handler (*)(std::terminate_handler))dlsym( library, "_ZSt13set_terminatePFvvE" );
    if ( (error = dlerror()) != NULL ) {
      fprintf( stderr, "uKernelBoot::startup : internal error, %s\n", error );
      exit( -1 );
    } // if
    orig_set_terminate( uEHM::uTerminateHandler );

    std::unexpected_handler (*orig_set_unexpected)( std::unexpected_handler );
    orig_set_unexpected = (std::unexpected_handler (*)(std::unexpected_handler))dlsym( library, "_ZSt14set_unexpectedPFvvE" );
    if ( (error = dlerror()) != NULL ) {
      fprintf( stderr, "uKernelBoot::startup : internal error, %s\n", error );
      exit( -1 );
    } // if
    orig_set_unexpected( uEHM::uUnexpectedHandler );

    // create user cluster

    uKernelModule::uUserCluster = new uCluster( "uUserCluster" );

    // create user processor

    uKernelModule::uUserProcessor = new uProcessor( *uKernelModule::uUserCluster );

    // uOwnerLock has a runtime check testing if locking is attempted from
    // inside the kernel. This check only applies once the system becomes
    // concurrent. During the previous boot-strapping code, some locks may be
    // invoked (and hence a runtime check would occur) but the system is not
    // concurrent. Hence, these locks are always open and no blocking can
    // occur. This flag enables uOwnerLock checking after this point.

    uKernelModule::uInitialization = true;

#ifdef __U_DEBUG_H__
    uDebugPrt( "uKernelBoot::startup2, uDisableInt:%d, uDisableIntCnt:%d, uPreemption:%d\n",
             THREAD_GETMEM( uDisableInt ), THREAD_GETMEM( uDisableIntCnt ), uThisProcessor().uGetPreemption() );
#endif // __U_DEBUG_H__

    uKernelModule::uTaskBoot->uMigrate( *uKernelModule::uUserCluster );

#ifdef __U_DEBUG_H__
    uDebugPrt( "uKernelBoot::startup3, uDisableInt:%d, uDisableIntCnt:%d, uPreemption:%d\n",
             THREAD_GETMEM( uDisableInt ), THREAD_GETMEM( uDisableIntCnt ), uThisProcessor().uGetPreemption() );
#endif // __U_DEBUG_H__
} // uKernelBoot::startup


void uKernelBoot::finishup() {
#ifdef __U_DEBUG_H__
    uDebugPrt( "uKernelBoot::finishup1, uDisableInt:%d, uDisableIntCnt:%d, uPreemption:%d\n",
             THREAD_GETMEM( uDisableInt ), THREAD_GETMEM( uDisableIntCnt ), uThisProcessor().uGetPreemption() );
#endif // __U_DEBUG_H__

    uKernelModule::uTaskBoot->uMigrate( *uKernelModule::uSystemCluster );

#ifdef __U_DEBUG_H__
    uDebugPrt( "uKernelBoot::finishup2, uDisableInt:%d, uDisableIntCnt:%d, uPreemption:%d\n",
             THREAD_GETMEM( uDisableInt ), THREAD_GETMEM( uDisableIntCnt ), uThisProcessor().uGetPreemption() );
#endif // __U_DEBUG_H__

    delete uKernelModule::uUserProcessor;
    delete uKernelModule::uUserCluster;

    delete uKernelModule::uTaskSystem;
    
    // Turn off uOwnerLock checking.

    uKernelModule::uInitialization = false;

    THREAD_GETMEM( uSelf )->uDisableInterrupts();
    uThisProcessor().uSetContextSwitchEvent( 0 );     // clear the alarm on this processor
    THREAD_GETMEM( uSelf )->uEnableInterrupts();

#ifndef __U_MULTI__
    delete uCluster::NBIO;
    delete uProcessor::uContextEvent;
    delete uProcessor::uContextSwitchHandler;
    delete uProcessor::uEvents;
#endif // ! __U_MULTI__

    // SKULLDUGGERY: The termination order for the boot task is different from
    // the starting order. This results from the fact that the boot task must
    // have a processor before it can start. However, the system processor must
    // have the thread from the boot task to terminate. Deleting the cluster
    // first requires that the boot task be removed first from the list of
    // tasks on the cluster or the cluster complains about unfinished
    // tasks. The boot task must be added again before its deletion because it
    // removes itself from the list. Also, when the boot task is being deleted
    // it is using the *already* deleted system cluster, which works only
    // because the system cluster storage is not dynamically allocated so the
    // storage is not scrubbed; therefore, it still has the necessary state
    // values to allow the boot task to be deleted. As well, the ready queue
    // has to be allocated spearately from the system cluster so it can be
    // deleted *after* the boot task is deleted because the boot task access
    // the ready queue during its deletion.

    // remove the boot task so the cluster does not complain
    uKernelModule::uSystemCluster->uTaskRemove( *(uBaseTask *)uKernelModule::uTaskBoot );

    // remove system processor, processor task and cluster
    uKernelModule::uSystemProcessor->uProcessor::~uProcessor();
    uKernelModule::uSystemCluster->uCluster::~uCluster();

    // remove processor kernal coroutine with execution still pending
    delete THREAD_GETMEM( uProcessorKernelStorage );

    // add the boot task back so it can remove itself from the list
    uKernelModule::uSystemCluster->uTaskAdd( *(uBaseTask *)uKernelModule::uTaskBoot );
    ((uBootTask *)uKernelModule::uTaskBoot)->uBootTask::~uBootTask();

    // Clean up storage associated with the boot task by pthread-like
    // thread-specific data (see ~uTaskMain). Must occur *after* all calls that
    // might call std::uncaught_exception, otherwise the exception
    // data-structures are created again for the task. (Note: ~uSerialX members
    // call std::uncaught_exception).

    if ( ((uBootTask *)uKernelModule::uTaskBoot)->pthreadData != NULL ) {
      _pthread_deletespecific( ((uBootTask *)uKernelModule::uTaskBoot)->pthreadData );
    } // if

    // no tasks on the ready queue so it can be deleted
    delete uKernelModule::uSystemScheduler;

#ifdef __U_DEBUG_H__
    uDebugPrt( "uKernelBoot::finishup3, uDisableInt:%d, uDisableIntCnt:%d, uPreemption:%d\n",
             THREAD_GETMEM( uDisableInt ), THREAD_GETMEM( uDisableIntCnt ), uThisProcessor().uGetPreemption() );
#endif // __U_DEBUG_H__

    delete uKernelModule::uGlobalClusters;
    delete uKernelModule::uGlobalProcessors;

    delete uKernelModule::uGlobalClusterLock;
    delete uKernelModule::uGlobalProcessorLock;
    delete uKernelModule::uGlobalAbortLock;

    // Explicitly invoking the destructor does not close down the heap because
    // it might still be used before the application terminates. The heap's
    // destructor does check for unreleased storage at this point. (The
    // constructor for the heap is called on the first call to malloc.)

    uHeapManager::uHeapManagerInstance->uHeapManager::~uHeapManager();
} // uKernelBoot::finishup


// Local Variables: //
// compile-command: "gmake install" //
// End: //

Generated by  Doxygen 1.6.0   Back to index