Logo Search packages:      
Sourcecode: u++ version File versions

uProcessor.cc

//                              -*- Mode: C++ -*- 
// 
// uC++ Version 5.0.1, Copyright (C) Peter A. Buhr 1994
// 
// uProcessor.cc -- 
// 
// Author           : Peter A. Buhr
// Created On       : Mon Mar 14 17:39:15 1994
// Last Modified By : Peter A. Buhr
// Last Modified On : Tue Sep  7 14:52:42 2004
// Update Count     : 1568
//
// This  library is free  software; you  can redistribute  it and/or  modify it
// under the terms of the GNU Lesser General Public License as published by the
// Free Software  Foundation; either  version 2.1 of  the License, or  (at your
// option) any later version.
// 
// This library is distributed in the  hope that it will be useful, but WITHOUT
// ANY  WARRANTY;  without even  the  implied  warranty  of MERCHANTABILITY  or
// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public License
// for more details.
// 
// You should  have received a  copy of the  GNU Lesser General  Public License
// along  with this library.
// 


#define __U_KERNEL__
#define __U_PROFILE__
#define __U_PROFILEABLE_ONLY__


#include <uC++.h>
#include <uProfiler.h>
#include <uProcessor.h>
#include <uAssert.h>
//#include <uDebug.h>

#include <cstring>                              // strerror
#include <cstdio>                         // fprintf
#include <cerrno>
#include <unistd.h>                             // getpid
#include <inttypes.h>                           // uintptr_t

#if defined( __solaris__ )
#include <sys/lwp.h>
#endif // __solaris__

#if defined( __linux__ )
#include <sys/wait.h>                           // waitpid
#include <sched.h>
#if ! defined( CLONE_PARENT )                   // TEMPORARY: problems in include files for Linux 2.4
#define CLONE_PARENT    0x00008000              // set if we want to have the same parent as the cloner
#endif // ! CLONE_PARENT

#if defined( __ia64__ )
extern "C" int __clone2( int (*__fn) (void *__arg), void *__child_stack_base, size_t __child_stack_size, int __flags, void *__arg );
#define REGPARM

#elif defined( __i386__ )

#define REGPARM __attribute__(( regparm(2) ))

#endif

extern "C" void *_dl_allocate_tls( void *mem ) REGPARM;
extern "C" void *_dl_get_tls_static_info( size_t *sizep, size_t *alignp ) REGPARM;

#endif // __linux__

#if defined( __irix__ )
#include <sys/wait.h>
#endif // __irix__

#if ! defined( __U_MULTI__ )
uEventList  *uProcessor::uEvents                = NULL;
uEventNode      *uProcessor::uContextEvent            = NULL;
uCxtSwtchHndlr  *uProcessor::uContextSwitchHandler      = NULL;
#if defined( __i386__ )
struct vperfctr *uProcessor::uPerfctrContext            = NULL;
#elif defined( __ia64__ ) && ! defined( __old_perfmon__ )
int              uProcessor::uPerfmon_fd                = 0;
#endif
#endif // ! __U_MULTI__

#ifdef __U_DEBUG__
static const int uMinPreemption = 1000;               // 1 second (milliseconds)
#endif // __U_DEBUG__


extern uProcessor &uThisProcessor() {
    return *THREAD_GETMEM( uActiveProcessor );
} // uThisProcessor


//######################### uProcessorDL #########################


uProcessorDL::uProcessorDL( uProcessor &w ) : uWho( w ) {}
uProcessor &uProcessorDL::uGet() const { return uWho; }


//######################### uProcessorTask #########################


void uProcessorTask::main() {
#ifdef __U_DEBUG_H__
    uDebugPrt( "(uProcessorTask &)0x%p.main, starting\n", this );
#endif // __U_DEBUG_H__
    uAssert( THREAD_GETMEM( uDisableInt) && THREAD_GETMEM( uDisableIntCnt) > 0 );

    // Race between creation of KT and start of child. Since the child needs
    // this information first, set it here.

    uProc.uPid =                          // set LWP pid for processor
#if defined( __solaris__ ) && defined( __U_MULTI__ )
      _lwp_self();
#else
      getpid();
#endif

    uProc.uProcessorClock = &uActiveProcessorKernel->uKernelClock;

    // Although the signal handlers are inherited by each child process, the
    // alarm setting is not.

    uProc.uSetContextSwitchEvent( uProc.uGetPreemption() );

#if __U_LOCALDEBUGGER_H__
    if ( uLocalDebugger::uLocalDebuggerActive ) uLocalDebugger::uLocalDebuggerInstance->checkPoint();
#endif // __U_LOCALDEBUGGER_H__

#if __U_LOCALDEBUGGER_H__
    if ( uLocalDebugger::uLocalDebuggerActive ) uLocalDebugger::uLocalDebuggerInstance->createKernelThread( uProc, uThisCluster() );
#endif // __U_LOCALDEBUGGER_H__

    if ( uProfiler::uProfiler_BuiltInRegisterProcessor ) {
      (*uProfiler::uProfiler_BuiltInRegisterProcessor)( uProfiler::uProfilerInstance, uBound );
    } // if

    for ( ;; ) {
      uAssert( THREAD_GETMEM( uDisableInt) && THREAD_GETMEM( uKernelModule::uDisableIntCnt) > 0 );
      uAccept( ~uProcessorTask ) {
#ifdef __U_DEBUG_H__
          uDebugPrt( "(uProcessorTask &)0x%p.main, ~uProcessorTask\n", this );
#endif // __U_DEBUG_H__
          break;
      } uOr uAccept( uSetPreemption ) {
#ifdef __U_DEBUG_H__
          uDebugPrt( "(uProcessorTask &)0x%p.main, uSetPreemption( %d )\n", this, uPreemption );
#endif // __U_DEBUG_H__
          uProc.uSetContextSwitchEvent( uPreemption ); // use it to set the alarm for this processor
          uProc.uPreemption = uPreemption;
      } uOr uAccept( uSetCluster ) {
#ifdef __U_DEBUG_H__
          uDebugPrt( "(uProcessorTask &)0x%p.main, uSetCluster\n", this );
#endif // __U_DEBUG_H__

#if __U_LOCALDEBUGGER_H__
          if ( uLocalDebugger::uLocalDebuggerActive ) uLocalDebugger::uLocalDebuggerInstance->checkPoint();
#endif // __U_LOCALDEBUGGER_H__

          // Remove the processor from the list of processor that live on
          // this cluster, and add it to the list of processors that live on
          // the new cluster. The move has to be done by the processor itself
          // when it is in a stable state. As well, the processor's task has
          // to be moved to the new cluster but it does not have to be
          // migrated there since it is a bound task.

          // change processor's notion of which cluster it is executing on
          uCluster &uPrev = uProc.uGetCluster();

          if ( uProfiler::uProfiler_RegisterProcessorMigrate ) { // task registered for profiling ?              
            (*uProfiler::uProfiler_RegisterProcessorMigrate)( uProfiler::uProfilerInstance, uProc, uPrev, *uClus );
          } // if

          uPrev.uProcessorRemove( uProc );
          uProc.uCurrCluster = uClus;
          THREAD_SETMEM( uActiveCluster, uClus );
          uClus->uProcessorAdd( uProc );
          uCurrCluster = uClus;                 // change task's notion of which cluster it is executing on

#if __U_LOCALDEBUGGER_H__
          if ( uLocalDebugger::uLocalDebuggerActive ) uLocalDebugger::uLocalDebuggerInstance->migrateKernelThread( uProc, *uClus );
#endif // __U_LOCALDEBUGGER_H__

          uSignalBlock uResult;
#if defined( __U_MULTI__ )
#if defined( __irix__ )
      } uOr uAccept( uProcCreate ) {
          uAssert( &uProc == uKernelModule::uSystemProcessor );
#ifdef __U_DEBUG_H__
          uDebugPrt( "(uProcessorTask &)0x%p.main, uProcCreate uPid:%d\n", this, uPid );
#endif // __U_DEBUG_H__
          uPid = sprocsp( __U_START_KT_NAME__, PR_SADDR | PR_SFDS | PR_SUMASK, uCreateProc, (char *)(uCreateProc->uProcessorKer.uBase), uCreateProc->uProcessorKer.uSize );
          if ( uPid == -1 ) {
            uAbort( "(uProcessorTask &)0x%p.main, uProcCreate, internal error, error(%d) %s.", this, errno, strerror( errno ) );
          } // if
          uSignalBlock uResult;
#endif
#if defined( __irix__ ) ||  defined( __linux__ )
      } uOr uAccept( uProcWait ) {
          uAssert( &uProc == uKernelModule::uSystemProcessor );
          int code, status = 0;

          for ( ;; ) {
             code = ::waitpid( uPid, &status, 0 );    // wait for the specific child and its exit status
            if ( code != -1 ) break;
            if ( errno != EINTR ) break;        // timer interrupt ?
          } // for

#ifdef __U_DEBUG_H__
          uDebugPrt( "(uProcessorTask &)0x%p.main, uProcWait uPid:%d, pid:%d, status:0x%p\n", this, uPid, code, status );
#endif // __U_DEBUG_H__

          // uProcWait and uSigChldHandler race to wait on the pid for normal
          // termination so the loser ignores the ECHILD.
          if ( code == -1 && errno != ECHILD ) {
            uAbort( "(uProcessorTask &)0x%p.main, uProcWait, internal error, error(%d) %s.", this, errno, strerror( errno ) );
          } // if
          if ( code > 0 && WIFSIGNALED( status ) ) {        // process died as the result of some signal ?
            uKernelModule::uCoreDumped = true;        // assume that the child dumped core
            uAbort( ": child process %d died", uPid );
          } // if
          uSignalBlock uResult;
#endif
      } uOr uAccept( uProcExit ) {
          uAssert( &uProc == uKernelModule::uSystemProcessor );
          uAbortExit();                         // shut down other processors
          _exit( uRetCode );
#endif // __U_MULTI__
      } // uAccept
    } // for

    if ( uProfiler::uProfiler_BuiltInDeregisterProcessor ) {
      (*uProfiler::uProfiler_BuiltInDeregisterProcessor)( uProfiler::uProfilerInstance, uBound );
    } // if

#if __U_LOCALDEBUGGER_H__
    if ( uLocalDebugger::uLocalDebuggerActive ) uLocalDebugger::uLocalDebuggerInstance->destroyKernelThread( uProc );
#endif // __U_LOCALDEBUGGER_H__

    uActiveProcessorKernel->uTerminated = true;
} // uProcessorTask::main


void uProcessorTask::uSetPreemption( int ms ) {
    uPreemption = ms;
} // uProcessorTask::uSetPreemption


void uProcessorTask::uSetCluster( uCluster &clus ) {
    uClus = &clus;                              // copy arguments
    uWait uResult;                              // wait for result
} // uProcessorTask::uSetCluster


#ifdef __U_MULTI__
#if defined( __irix__ )
pid_t uProcessorTask::uProcCreate( uProcessor *np ) {
    uCreateProc = np;                           // copy arguments
    uWait uResult;                              // wait for result
    return uPid;
} // uProcessorTask::uProcCreate
#endif


#if defined( __irix__ ) ||  defined( __linux__ )
void uProcessorTask::uProcWait( pid_t pid ) {
    uPid = pid;                                 // copy arguments
    uWait uResult;                              // wait for result
} // uProcessorTask::uProcWait
#endif


void uProcessorTask::uProcExit( int retcode ) {
    uRetCode = retcode;                         // copy arguments
    uWait uResult;                              // wait for result
    uAbort( "(uProcessorTask &)0x%p.uProcExit() : internal error, CONTROL NEVER REACHES HERE!", this );
} // uProcessorTask::uProcExit
#endif // __U_MULTI__


uProcessorTask::uProcessorTask( uCluster &uClus, uProcessor &uProc ) : uBaseTask( uClus, uProc ), uProc( uProc ) {
} // uProcessorTask::uProcessorTask


uProcessorTask::~uProcessorTask() {
#ifdef __U_MULTI__
    // do not wait for uSystemProcessor KT as it must return to the shell
  if ( &uProc == uKernelModule::uSystemProcessor ) return;

    pid_t pid = uProc.uGetPid();                // pid of underlying KT
#if defined( __solaris__ )
    int code;
    for ( ;; ) {
      code = _lwp_wait( pid, NULL );                  // wait for termination of KT
      if ( code == 0 ) break;
      if ( code != EINTR ) break;               // timer interrupt ?
    } // for
    if ( code != 0 ) {
      uAbort( "(uProcessor &)0x%p.~uProcessor() : internal error, wait failed for kernel thread %ld, error(%d) %s.",
            this, (long int)pid, code, strerror( code ) );
    } // if

#elif defined( __linux__ )
    // Linux clone can only wait for a child not a sibling KT.  However, by
    // judicious use of CLONE_PARENT all KTs can be made to have the SKT as
    // their parent.  Therefore, only the SKT can perform the waitpid by
    // calling its processor task to guarantee the operation is performed on
    // the SKT.

    uKernelModule::uSystemProcessor->uProcWait( pid );

#elif defined( __irix__ )
    uKernelModule::uSystemProcessor->uProcWait( pid );
#else
    #error uC++ internal error : unsupported architecture
#endif

#endif // __U_MULTI__
} // uProcessorTask::~uProcessorTask


//######################### uProcessorKernel #########################


__U_START_KT_TYPE__ {
#if defined (__U_MULTI__)
    volatile uKernelModule *km;

#if defined( __irix__ )
    // the user part of the PRDA holds the kernel module
    km = (uKernelModule *)&(PRDA->usr_prda);
    km->ctor();

#elif defined( __linux__ ) && defined( __i386__ )
    /*
            +------------------+
            |                  |
            |  struct pthread  |
            |                  |
            +------------------+ <== gs:0 (thread pointer)
            |                  |
            |       TLS        | <== uKernelModuleBoot in here somewhere
            |                  |
            +------------------+ <== must be aligned to tls_align
            |                  |
            | stack continues  |
            .                  .
            .                  .
            .                  .
    */
#if defined( __U_TLS__ )
#define SIZEOF_STRUCT_PTHREAD 3*1024
    // fake the beginning of struct pthread, since glibc makes assumptions about thread ptr
    struct fake_pthread {
      union {
          struct {
            void *tcb;                                /* TCB pointer: not necessarily the thread descriptor used by libpthread */
            void *dtv;
            void *self;                               /* thread descriptor pointer */
            int multiple_threads;
            uintptr_t sysinfo;
          } tcbhead;
          void *__padding[16];
      };
      void *list_next;
      void *list_prev;
      pid_t tid;
      pid_t pid;
    } *descr;

    size_t tls_size, tls_align, adj;
    void *block, *tls_ptr;
    _dl_get_tls_static_info( &tls_size, &tls_align );
    block = alloca( SIZEOF_STRUCT_PTHREAD + tls_size );
    // block may not be sufficiently aligned
    adj = ((unsigned long) block) & ( tls_align - 1 );
    if ( adj ) {
        alloca( adj );
    } // if
    tls_ptr = (void*)(((unsigned long)block) & ~( tls_align - 1 ));
    descr = (struct fake_pthread*)((unsigned long)tls_ptr + tls_size);
    memset( descr, sizeof( struct fake_pthread ), 0 );
    asm ("movl %%gs:(%1), %0" : "=q" (descr->tcbhead.sysinfo) : "r" (offsetof( struct fake_pthread, tcbhead.sysinfo )));
    descr->tcbhead.tcb = descr; 
    descr->tcbhead.self = descr;
    asm ("leal _ZN13uKernelModule17uKernelModuleBootE@ntpoff(%1),%0" : "=r" (km) : "r" (descr));
    km->ldtValue = ((uProcessor *)p)->ldtValue;
    km->threadPointer = (unsigned long)descr;
    km->ctor();
    _dl_allocate_tls( descr );
    km->ldtValue = ((uProcessor *)p)->ldtValue;
    km->threadPointer = (unsigned long)descr;
    km->ctor();
#else
    uKernelModule theKernelModule;
    km = &theKernelModule;
    km->ldtValue = ((uProcessor *)p)->ldtValue;
    km->threadPointer = (unsigned long)km;
    km->ctor();
#endif // __U_TLS__

#elif defined( __linux__ ) && defined( __ia64__ )
#if defined( __U_TLS__ )
    /*
            +------------------+
            |                  |
            |       TLS        | <== uKernelModuleBoot in here somewhere
            |                  |
            +------------------+ <== must be aligned to tls_align
            |     16 bytes     |
            +------------------+ <== r13 (thread pointer)
            |                  |
            |  struct pthread  |
            |                  |
            +------------------+
            |                  |
            | stack continues  |
            .                  .
            .                  .
            .                  .
    */
#define SIZEOF_STRUCT_PTHREAD 3*1024
    size_t tls_size, tls_align;
    void *block;
    uintptr_t tls_ptr, new_thread_ptr;
    
    _dl_get_tls_static_info( &tls_size, &tls_align );
    block = alloca( SIZEOF_STRUCT_PTHREAD + tls_size + (tls_align-1) );
    tls_ptr = ( (uintptr_t)block + SIZEOF_STRUCT_PTHREAD + tls_align - 1 ) & ~( tls_align - 1 );
    new_thread_ptr = tls_ptr - 16;
    asm volatile ( "add r15 = 8, r13;;\n"
                   "ld8 r15 = [r15];;\n"
               "st8 [%0] = r15" : : "r" ((unsigned long*)new_thread_ptr + 1) : "r15" ); // copy the magic "sysinfo" value
    /* Register r13 (tp) is reserved by the ABI as "thread pointer". */
    asm volatile ("mov r13=%0;;\n" : : "ir" (new_thread_ptr) );
    // TEMPORARY: we should be able to say
    //    km = &uKernelModule::uKernelModuleBoot;
    // but a gcc bug prevents this; hence the following:
    asm volatile ("addl %0 = @ltoff(@tprel(_ZN13uKernelModule17uKernelModuleBootE#)), gp;;\n"
                  "ld8 %0 = [%0];;\n"
                  "add %0 = %0, r13;;\n" : "=r" (km) );

    // we need a valid kernel module in order to do _dl_allocate_tls, since it calls calloc
    // however, it also initializes tls, which deinitializes the kernel module
    km->ctor();
    _dl_allocate_tls( (void*)new_thread_ptr );
    km->ctor();
#else
    uKernelModule theKernelModule;
    km = &theKernelModule;
    km->ctor();
#endif // __U_TLS__

#else
    // allocate a kernel module at the bottom of the thread's stack
    uKernelModule theKernelModule;
    km = &theKernelModule;
    km->ctor();
#endif

    // NO DEBUG PRINTS BEFORE THE THREAD REFERENCE IS SET IN CTOR.

#ifdef __U_DEBUG_H__
    uDebugPrt( "startthread, child started\n" );
#endif // __U_DEBUG_H__

    uProcessor &uProc = *(uProcessor *)p;
    uProcessorKernel *pk = &uProc.uProcessorKer;

    km->uProcessorKernelStorage = pk;

    // initialize thread members

    THREAD_SETMEM( uActiveProcessor, &uProc );
    THREAD_SETMEM( uActiveCluster, THREAD_GETMEM( uActiveProcessor )->uCurrCluster );
    
    uAssert( THREAD_GETMEM( uDisableInt ) && THREAD_GETMEM( uDisableIntCnt ) == 1 );

    THREAD_GETMEM( uSelf )->uDisableInterrupts();
    uMachContext::uInvokeCoroutine( *uActiveProcessorKernel );
#endif // __U_MULTI__

#if defined( __linux__ )
    // this line is never reached, but linux allows the start-thread function to
    // return a value, and gcc warns otherwise.
    return 0;
#endif // linux
} // __U_START_KT_TYPE__


inline void uProcessorKernel::taskIsBlocking() {
    uBaseTask &t = uThisTask();                       // optimization
    if ( t.uGetState() != uBaseTask::uTerminate ) {
      t.uSetState( uBaseTask::uBlocked );
    } // if
} // uProcessorKernel::taskIsBlocking


void uProcessorKernel::uSchedule() {
    uAssert( ! uThisTask().uReadyRef.uListed() );
    uAssert( ! THREAD_GETMEM( uDisableIntSpin ) );

    taskIsBlocking();

    uCode = 0;
    uContextSw();                         // not uResume because entering kernel
} // uProcessorKernel::uSchedule


void uProcessorKernel::uSchedule( uSpinLock *l ) {
    uAssert( ! uThisTask().uReadyRef.uListed() );
    uAssert( THREAD_GETMEM( uDisableIntSpinCnt ) == 1 );

    taskIsBlocking();

    uCode = 1;
    uPrevLock = l;
    uContextSw();                         // not uResume because entering kernel
} // uProcessorKernel::uSchedule


void uProcessorKernel::uSchedule( uBaseTask *t ) {
    // SKULLDUGGERY: uBootTask is on ready queue for first entry into the kernel.
    uAssert( &uThisTask() != (uBaseTask *)uKernelModule::uTaskBoot ? ! uThisTask().uReadyRef.uListed() : true );
    uAssert( ! THREAD_GETMEM( uDisableIntSpin ) );

    if ( t != &uThisTask() ) {
      taskIsBlocking();
    } // if

    uCode = 2;
    uNextTask = t;
    uContextSw();                         // not uResume because entering kernel
} // uProcessorKernel::uSchedule


void uProcessorKernel::uSchedule( uSpinLock *l, uBaseTask *t ) {
    uAssert( ! uThisTask().uReadyRef.uListed() );
    uAssert( THREAD_GETMEM( uDisableIntSpinCnt ) == 1 );

    taskIsBlocking();

    uCode = 3;
    uPrevLock = l;
    uNextTask = t;
    uContextSw();                         // not uResume because entering kernel
} // uProcessorKernel::uSchedule


void uProcessorKernel::uOnBehalfOfUser() {
    switch( uCode ) {
      case 0:
      break;
      case 1:
      uPrevLock->release();
      break;
      case 2:
      uNextTask->uWake();
      break;
      case 3:
      uPrevLock->release();
      uNextTask->uWake();
      break;
      default:
      uAbort( "(uProcessorKernel &)0x%p.uOnBehalfOfUser : internal error, uCode:%d.", this, uCode );
      break;
    } // switch
} // uProcessorKernel::uOnBehalfOfUser


void uProcessorKernel::uSetTimer( uDuration dur ) {
  if ( dur < 0 ) return;                        // if duration is negative, it's invalid

    // For now, write only code for non-posix timer. When posix timer is
    // available use timer_create and timer_settimer.

    timeval conv = dur;
    itimerval it;
    it.it_value = conv;                         // fill in the value to the next expiry
    it.it_interval.tv_sec = 0;                        // not periodic
    it.it_interval.tv_XSEC = 0;
#ifdef __U_DEBUG_H__
    char buffer[256];
    uDebugPrtBuf( buffer, "uProcessorKernel::uSetTimer, it.it_value.tv_sec:%ld, it.it_value.tv_ssec:%ld\n", it.it_value.tv_sec, it.it_value.tv_usec );
#endif // __U_DEBUG_H__
    setitimer( ITIMER_REAL, &it, NULL );        // set the alarm clock to go off
} // uProcessorKernel::uSetTimer


void uProcessorKernel::uSetTimer( uTime time ) {
  if ( time == 0 ) return;                      // if time is zero, it's invalid

    // Since this is private, and only uEventList is a friend, the assumption
    // is made that the time parameter is always in real-time (not virtual
    // time)

#if defined( REALTIME_POSIX )
    timespec curr;
    if ( clocktype < 0 ) type = CLOCK_REALTIME;
    clock_gettime( type, &curr );
#else
    timeval curr;
    GETTIMEOFDAY( &curr );
#endif
    uTime currtime( curr.tv_sec, curr.tv_usec * 1000 );     // convert to nanoseconds

    uDuration dur = time - currtime;
    if ( dur <= 0 ) {                           // if duration is negative (it has already past)
      // fill in the value to the next expiry by setting alarm to soonest
      // time an alarm may come
      uSetTimer( uDuration( 0, TIMEGRAN / 1000000L ) );
    } else {
      uSetTimer( dur );
    } // if
} // uProcessorKernel::uSetTimer


void uProcessorKernel::nextProcessor( uProcessorDL *&uCurrProc ) {
    // Get next processor to execute.

    unsigned int uPrevPreemption = uThisProcessor().uGetPreemption(); // remember previous preemption value
#ifdef __U_DEBUG_H__
    uDebugPrt( "(uProcessorKernel &)0x%p.nextProcessor, from processor 0x%p on cluster %.256s (0x%p) with time slice %d\n",
             this, &uThisProcessor(), uThisProcessor().uCurrCluster->uGetName(), uThisProcessor().uCurrCluster, uThisProcessor().uGetPreemption() );
#endif // __U_DEBUG_H__
    uCurrProc = uKernelModule::uGlobalProcessors->uSucc( uCurrProc );
    if ( uCurrProc == NULL ) {                        // make list appear circular
      uCurrProc = uKernelModule::uGlobalProcessors->uHead(); // restart at beginning of list
    } // if
    THREAD_SETMEM( uActiveProcessor, &(uCurrProc->uGet() ) );
    THREAD_SETMEM( uActiveCluster, THREAD_GETMEM( uActiveProcessor )->uCurrCluster );
#ifdef __U_DEBUG_H__
    uDebugPrt( "(uProcessorKernel &)0x%p.nextProcessor, to processor 0x%p on cluster %.256s (0x%p) with time slice %d\n",
             this, &uThisProcessor(), uThisProcessor().uCurrCluster->uGetName(), uThisProcessor().uCurrCluster, uThisProcessor().uGetPreemption() );
#endif // __U_DEBUG_H__

    // The time slice must be reset or some programs will not work.

    if ( uThisProcessor().uGetPreemption() != uPrevPreemption ) {
      uThisProcessor().uSetContextSwitchEvent( uThisProcessor().uGetPreemption() );
    } // if
} // nextProcessor


void uProcessorKernel::main() {
#ifdef __U_DEBUG_H__
    uDebugPrt( "(uProcessorKernel &)0x%p.main, child is born\n", this );
#endif // __U_DEBUG_H__

#if ! defined( __U_MULTI__ )
    // SKULLDUGGERY: The system processor in not on the global list until the
    // processor task runs, so explicitly set the current processor to the
    // system processor.

    uProcessorDL *uCurrProc = &(uKernelModule::uSystemProcessor->uGlobalRef);
    uProcessorDL *uCycleStart = NULL;
    bool &uOkToSelect = uCluster::NBIO->uOkToSelect;
    uBaseTask *&uIOPoller = uCluster::NBIO->uIOPoller;
#endif // ! __U_MULTI__

    for ( unsigned int spin = 0;; ) {
      // Advance the spin counter now to detect if a task is executed.

      spin += 1;

      if ( ! uThisProcessor().uExternal.uEmpty() ) {  // check processor specific ready queue
          // Only this processor removes from this ready queue so no other
          // processor can remove this task after it has been seen.

          THREAD_SETMEM( uActiveTask, uThisProcessor().uCurrTask = &(uThisProcessor().uExternal.uDropHead()->uGet()));

#ifdef __U_DEBUG_H__
          uDebugPrt( "(uProcessorKernel &)0x%p.main, scheduling(1) bef: task %.256s (0x%p) (limit:0x%p,base:0x%p,stack:0x%p) from cluster:%.256s (0x%p) on processor:0x%p, %d,%d,%d,%d,%d\n",
                   this, uThisProcessor().uCurrTask->uCurrCoroutine->uName, uThisProcessor().uCurrTask->uCurrCoroutine,
                   uThisProcessor().uCurrTask->uLimit, uThisProcessor().uCurrTask->uBase, uThisProcessor().uCurrTask->uStackPointer(),
                   uThisProcessor().uCurrCluster->uGetName(), uThisProcessor().uCurrCluster, &uThisProcessor(),
                   THREAD_GETMEM( uDisableInt ),
                   THREAD_GETMEM( uDisableIntCnt ),
                   THREAD_GETMEM( uDisableIntSpin ),
                   THREAD_GETMEM( uDisableIntSpinCnt ),
                   THREAD_GETMEM( uInKernelRF )
            );
#endif // __U_DEBUG_H__

          // SKULLDUGGERY: The processor task is part of the kernel, and
          // therefore, must execute as uninterruptible code. By incrementing
          // the interrupt counter here, the decrement when the processor
          // task is scheduled leaves the processor task's execution in the
          // kernel with regard to interrupts. This assumes the uDisableInt
          // flag is set already.

          uAssert( THREAD_GETMEM( uDisableInt ) && THREAD_GETMEM( uKernelModule::uDisableIntCnt ) > 0 );
          THREAD_GETMEM( uSelf )->uDisableInterrupts();

#if defined( __U_MULTI__ ) && defined( __U_SWAPCONTEXT__ )
#if defined( __i386__ )
          ((ucontext_t *)(uThisProcessor().uCurrTask->uCurrCoroutine->uStorage))->uc_mcontext.gregs[REG_GS] = THREAD_GETMEM( ldtValue );    
#elif defined( __ia64__ )
          ((ucontext_t *)(uThisProcessor().uCurrTask->uCurrCoroutine->uStorage))->uc_mcontext.sc_gr[13] = (unsigned long)THREAD_GETMEM( threadPointer );
#elif defined( __sparc__ )
          ((ucontext_t *)(uThisProcessor().uCurrTask->uCurrCoroutine->uStorage))->uc_mcontext.gregs[REG_G7] = (int)(THREAD_GETMEM( uSelf ) ); 
#endif
#endif // __U_MULTI__ && __U_SWAPCONTEXT__

          uSwitch( uStorage, uThisProcessor().uCurrTask->uCurrCoroutine->uStorage );
          THREAD_GETMEM( uSelf )->uEnableInterrupts();
          uAssert( THREAD_GETMEM( uDisableInt ) && THREAD_GETMEM( uDisableIntCnt ) > 0 );

#ifdef __U_DEBUG_H__
          uDebugPrt( "(uProcessorKernel &)0x%p.main, scheduling(1) aft: task %.256s (0x%p) (limit:0x%p,base:0x%p,stack:0x%p) from cluster:%.256s (0x%p) on processor:0x%p, %d,%d,%d,%d,%d\n",
                   this, uThisProcessor().uCurrTask->uCurrCoroutine->uName, uThisProcessor().uCurrTask->uCurrCoroutine,
                   uThisProcessor().uCurrTask->uLimit, uThisProcessor().uCurrTask->uBase, uThisProcessor().uCurrTask->uStackPointer(),
                   uThisProcessor().uCurrCluster->uGetName(), uThisProcessor().uCurrCluster, &uThisProcessor(),
                   THREAD_GETMEM( uDisableInt ),
                   THREAD_GETMEM( uDisableIntCnt ),
                   THREAD_GETMEM( uDisableIntSpin ),
                   THREAD_GETMEM( uDisableIntSpinCnt ),
                   THREAD_GETMEM( uInKernelRF )
            );
#endif // __U_DEBUG_H__

          spin = 0;                             // set number of spins back to zero
          uOnBehalfOfUser();                    // execute code on scheduler stack on behalf of user

          if ( uTerminated ) {
#ifdef __U_MULTI__
            if ( &uThisProcessor() != uKernelModule::uSystemProcessor ) break;
            // If control reaches here, the boot task must be the only task
            // on the system-cluster ready-queue, and it must be restarted
            // to finish the close down.
#else
#ifdef __U_DEBUG_H__
            uDebugPrt( "(uProcessorKernel &)0x%p.main termination, uCurrProc:0x%p, uThisProcessor:0x%p\n",
                     this, &(uCurrProc->uGet()), &uThisProcessor() );
#endif // __U_DEBUG_H__
            // In the uniprocessor case, only terminate the processor kernel
            // when the processor task for the system processor is deleted;
            // otherwise the program stops when the first processor is deleted.

            if ( &uThisProcessor() != uKernelModule::uSystemProcessor ) {
                uTerminated = false;

                // Get next processor to execute because the current one
                // just terminated.

                nextProcessor( uCurrProc );
            } // if
#endif // __U_MULTI__
          } // if
      } // if

      uBaseTask *uReadyTask = &(uThisProcessor().uCurrCluster->uReadyTaskTryRemove());
      if ( uReadyTask != NULL ) {               // ready queue not empty, schedule that task
          uAssert( ! uReadyTask->uReadyRef.uListed() );
          THREAD_SETMEM( uActiveTask, uThisProcessor().uCurrTask = uReadyTask );

#ifdef __U_DEBUG_H__
          uDebugPrt( "(uProcessorKernel &)0x%p.main, scheduling(2) bef: task %.256s (0x%p) (limit:0x%p,base:0x%p,stack:0x%p) from cluster:%.256s (0x%p) on processor:0x%p, %d,%d,%d,%d,%d\n",
                   this, uReadyTask->uCurrCoroutine->uName, uReadyTask->uCurrCoroutine,
                   uReadyTask->uCurrCoroutine->uLimit, uReadyTask->uCurrCoroutine->uBase, uReadyTask->uCurrCoroutine->uStackPointer(),
                   uThisProcessor().uCurrCluster->uGetName(), uThisProcessor().uCurrCluster, &uThisProcessor(),
                   THREAD_GETMEM( uDisableInt ),
                   THREAD_GETMEM( uDisableIntCnt ),
                   THREAD_GETMEM( uDisableIntSpin ),
                   THREAD_GETMEM( uDisableIntSpinCnt ),
                   THREAD_GETMEM( uInKernelRF )
            );
#endif // __U_DEBUG_H__

          uAssert( THREAD_GETMEM( uDisableInt ) && THREAD_GETMEM( uDisableIntCnt ) > 0 );

#if defined( __U_MULTI__ ) && defined( __U_SWAPCONTEXT__ )
#if defined( __i386__ )
          ((ucontext_t *)(uReadyTask->uCurrCoroutine->uStorage))->uc_mcontext.gregs[REG_GS] = THREAD_GETMEM( ldtValue );    
#elif defined( __ia64__ )
          ((ucontext_t *)(uReadyTask->uCurrCoroutine->uStorage))->uc_mcontext.sc_gr[13] = THREAD_GETMEM( threadPointer );
#elif defined( __sparc__ )
          ((ucontext_t *)(uReadyTask->uCurrCoroutine->uStorage))->uc_mcontext.gregs[REG_G7] = (int)(THREAD_GETMEM( uSelf ) ); 
#endif
#endif // __U_MULTI__ && __U_SWAPCONTEXT__

          uSwitch( uStorage, uReadyTask->uCurrCoroutine->uStorage );
          uAssert( THREAD_GETMEM( uDisableInt ) && THREAD_GETMEM( uDisableIntCnt ) > 0 );

          // SKULLDUGGERY: During spinning, the uActiveTask should not point
          // to the last task executed. Otherwise, the profiler incorrectly
          // displays the last task executed during spinning. So uActiveTask
          // is set to the uProcessorTask for this processor.
          THREAD_SETMEM( uActiveTask, uThisProcessor().uCurrTask = uThisProcessor().uProcTask );

#ifdef __U_DEBUG_H__
          uDebugPrt( "(uProcessorKernel &)0x%p.main, scheduling(2) aft: task %.256s (0x%p) (limit:0x%p,base:0x%p,stack:0x%p) from cluster:%.256s (0x%p) on processor:0x%p, %d,%d,%d,%d,%d\n",
                   this, uReadyTask->uCurrCoroutine->uName, uReadyTask->uCurrCoroutine,
                   uReadyTask->uLimit, uReadyTask->uBase, uReadyTask->uStackPointer(),
                   uThisProcessor().uCurrCluster->uGetName(), uThisProcessor().uCurrCluster, &uThisProcessor(),
                   THREAD_GETMEM( uDisableInt ),
                   THREAD_GETMEM( uDisableIntCnt ),
                   THREAD_GETMEM( uDisableIntSpin ),
                   THREAD_GETMEM( uDisableIntSpinCnt ),
                   THREAD_GETMEM( uInKernelRF )
            );
#endif // __U_DEBUG_H__

#ifdef __U_MULTI__
          spin = 0;                             // set number of spins back to zero
#else
          // Poller task does not count as an executed task, if its last
          // execution found no I/O and this processor's ready queue is
          // empty. Check before calling uOnBehalfOfUser, because uIOPoller
          // may put itself back on the ready queue, which makes the ready
          // queue appear non-empty.

          if ( uReadyTask != uIOPoller || uCluster::NBIO->uFound != 0 || ! uThisProcessor().uCurrCluster->uReadyTasksEmpty() ) {
            spin = 0;                     // set number of spins back to zero
          } // if
#endif // __U_MULTI__
          uOnBehalfOfUser();                    // execute code on scheduler stack on behalf of user
      } // if

#ifdef __U_MULTI__
      if ( spin > uThisProcessor().uGetSpin() ) {
          // SKULLDUGGERY: During pausing, the processor's uCurrTask should
          // not point to the last task executed. Otherwise, the visualizer
          // incorrectly displays the last task executed during pausing. So
          // uCurrTask is set to the uProcessorTask for this processor.

#ifdef __U_DEBUG_H__
          uDebugPrt( "(uProcessorKernel &)0x%p.main, on processor 0x%p nothing to do\n", this, &uThisProcessor() );
#endif // __U_DEBUG_H__
          THREAD_SETMEM( uActiveTask, uThisProcessor().uCurrTask = uThisProcessor().uProcTask );
          uThisProcessor().uCurrCluster->uProcessorPause(); // put processor to sleep
          spin = 0;                             // set number of spins back to zero
      } // if
#else
      // A cycle starts when a processor executes no tasks. If the cycle
      // completes and no task has executed, deadlock has occurred unless
      // there are pending I/O tasks. If there are pending I/O tasks, the I/O
      // poller task pauses the UNIX process at the "select".

#ifdef __U_DEBUG_H__
      uDebugPrt( "(uProcessorKernel &)0x%p.main, uCycleStart:0x%p, uCurrProc:%.256s (0x%p), spin:%d, uReadyTask:0x%p, uIOPoller:0x%p\n",
              this, uCycleStart, uCurrProc->uGet().uCurrCluster->uGetName(), uCurrProc, spin, uReadyTask, uIOPoller );
#endif // __U_DEBUG_H__

      if ( uCycleStart == uCurrProc && spin != 0 ) {
#if __U_LOCALDEBUGGER_H__
#ifdef __U_DEBUG_H__
          uDebugPrt( "(uProcessorKernel &)0x%p.main, uLocalDebuggerInstance:0x%p, uIOPoller: %.256s (0x%p), dispatcher:0x%p, debugger_blocked_tasks:%d, uPendingIO.uHead:0x%p, uPendingIO.uTail:0x%p\n",
                   this, uLocalDebugger::uLocalDebuggerInstance,
                   uIOPoller != NULL ? uIOPoller->uGetName() : "no I/O poller task", uIOPoller,
                   uLocalDebugger::uLocalDebuggerInstance != NULL ? uLocalDebugger::uLocalDebuggerInstance->dispatcher : NULL,
                   uLocalDebugger::uLocalDebuggerInstance != NULL ? uLocalDebugger::uLocalDebuggerInstance->debugger_blocked_tasks : 0,
                   uCluster::NBIO->uPendingIO.uHead(), uCluster::NBIO->uPendingIO.uTail() );
#endif // __U_DEBUG_H__
#endif // __U_LOCALDEBUGGER_H__
          if ( uIOPoller != NULL                // I/O poller task ?
#if __U_LOCALDEBUGGER_H__
            && (
                uLocalDebugger::uLocalDebuggerInstance == NULL || // local debugger initialized ?
                uIOPoller != (uBaseTask *)uLocalDebugger::uLocalDebuggerInstance->dispatcher || // I/O poller the local debugger reader ?
                uLocalDebugger::uLocalDebuggerInstance->debugger_blocked_tasks != 0 || // any tasks debugger blocked ?
                uCluster::NBIO->uPendingIO.uHead() != uCluster::NBIO->uPendingIO.uTail() // any other tasks waiting for I/O ?
                )
#endif // __U_LOCALDEBUGGER_H__
            ) {
#ifdef __U_DEBUG_H__
            uDebugPrt( "(uProcessorKernel &)0x%p.main, poller blocking\n", this );
#endif // __U_DEBUG_H__

            uOkToSelect = true;                 // tell poller it is ok to call UNIX select, reset in uPollIO
          } else if ( uThisProcessor().uEvents->uUserEventPresent() ) {  // tasks sleeping, except system task  ?
#ifdef __U_DEBUG_H__
            uDebugPrt( "(uProcessorKernel &)0x%p.main, sleeping with pending events\n", this );
#endif // __U_DEBUG_H__
            uThisProcessor().uCurrCluster->uProcessorPause(); // put processor to sleep
          } else {
            fprintf( stderr, "Clusters and tasks present at deadlock:\n" );
            uSeqGen<uClusterDL> ci;
            uClusterDL *cr;
            for ( ci.uOver( *uKernelModule::uGlobalClusters ); ci >> cr; ) {
                uCluster *cluster = &cr->uGet();
                fprintf( stderr, "%.256s (0x%p)\n", cluster->uGetName(), cluster );

                fprintf( stderr, "\ttasks:\n" );
                uBaseTaskDL *bt;
                for ( uSeqGen<uBaseTaskDL> tgen( cluster->uTasksOnCluster ); tgen >> bt; ) {
                  uBaseTask *task = &bt->uGet();
                  fprintf( stderr, "\t\t %.256s (0x%p)\n", task->uGetName(), task );
                } // for
            } // for
            uAbort( ": no ready or pending tasks.\n"
                  "Possible cause is tasks are in a synchronization or mutual exclusion deadlock." );
          } // if
      } // if

      if ( spin == 0 ) {                        // task executed ?
          uCycleStart = uCurrProc;              // mark new start for cycle
      } // if

      // Get next processor to execute.

      nextProcessor( uCurrProc );
#endif // __U_MULTI__
    } // for

    // ACCESS NO KERNEL DATA STRUCTURES AFTER THIS POINT BECAUSE THEY MAY NO
    // LONGER EXIST.

#ifdef __U_DEBUG_H__
    uDebugPrt( "(uProcessorKernel &)0x%p.main, exiting\n", this );
#endif // __U_DEBUG_H__

    uThisCluster().uMakeProcessorActive();

    THREAD_GETMEM( uSelf )->dtor();
#if defined( __solaris__ )
    _lwp_exit();                          // exit without calling the global destructors.
#else
    _exit( 0 );                                 // exit without calling the global destructors.
#endif // __solaris__
} // uProcessorKernel::main


uProcessorKernel::uProcessorKernel() {
    uTerminated = false;
} // uProcessorKernel::uProcessorKernel

uProcessorKernel::~uProcessorKernel() {
#ifdef __U_DEBUG_H__
    uDebugPrt( "(uProcessorKernel &)0x%p.~uProcessorKernel, exiting\n", this );
#endif // __U_DEBUG_H__
} // uProcessorKernel::~uProcessorKernel


void *uProcessorKernel::operator new( size_t, void *storage ) {
    return storage;
} // uProcessorKernel::operator new

void *uProcessorKernel::operator new( size_t size ) {
    return ::operator new( size );
} // uProcessorKernel::operator new


//######################### uProcessor #########################


void *uProcessor::operator new( size_t, void *storage ) {
    return storage;
} // uProcessor::operator new

void *uProcessor::operator new( size_t size ) {
    return ::operator new( size );
} // uProcessor::operator new


void uProcessor::uCreateProcessor( uCluster &uClus, int ms, int spin ) {
#ifdef __U_DEBUG_H__
    uDebugPrt( "(uProcessor &)0x%p.uCreateProcessor, on cluster %.256s (0x%p)\n", this, uClus.uGetName(), &uClus );
#endif // __U_DEBUG_H__

#ifdef __U_DEBUG__
    if ( ms == 0 ) {                            // 0 => infinity, reset to minimum preemption second for local debugger
      ms = uMinPreemption;                      // approximate infinity
    } else if ( ms < 0 ) {                      // special case for testing, not for public consumption
      ms = 0;
    } // if
#ifdef __U_MULTI__
    uDebugIgnore = false;
#else
    uDebugIgnore = true;
#endif // __U_MULTI__
#endif // __U_DEBUG__

    uCurrTask = NULL;
    uCurrCluster = &uClus;
    uPreemption = ms;
    uSpin = spin;

#ifdef __U_MULTI__
    uEvents = new uEventList;
    uContextSwitchHandler = new uCxtSwtchHndlr;
    uContextEvent = new uEventNode( *uContextSwitchHandler );
#if defined( __i386__ )
    uPerfctrContext = NULL;
#elif defined( __ia64__ ) && ! defined( __old_perfmon__ )
    uPerfmon_fd = 0;
#endif
#endif // __U_MULTI__
    uCurrCluster->uProcessorAdd( *this );

    uKernelModule::uGlobalProcessorLock->acquire();   // add processor to global processor list.
    uKernelModule::uGlobalProcessors->uAddTail( &(uGlobalRef) );
    uKernelModule::uGlobalProcessorLock->release();

    uProcTask = new uProcessorTask( uClus, *this );
    
#if defined( __i386__ ) && defined( __U_MULTI__ )
    ldtValue = uAllocLDT();
#endif // __i386__ && __U_MULTI__
} // uProcessor::uCreateProcessor


uProcessor::uProcessor( uCluster &uClus, double ) : uIdleRef( *this ), uProcessorRef( *this ), uGlobalRef( *this ) {
    uCreateProcessor( uClus, uDefaultPreemption(), uDefaultSpin() );
} // uProcessor::uProcessor


uProcessor::uProcessor( unsigned int ms, unsigned int spin ) : uIdleRef( *this ), uProcessorRef( *this ), uGlobalRef( *this ) {
    uCreateProcessor( uThisCluster(), ms, spin );
    uThisProcessor().uFork( this );             // processor executing this declaration forks the UNIX process

    if ( uProfiler::uProfiler_RegisterProcessor ) {
      (*uProfiler::uProfiler_RegisterProcessor)( uProfiler::uProfilerInstance, *this );
    } // if
} // uProcessor::uProcessor


uProcessor::uProcessor( uCluster &uClus, unsigned int ms, unsigned int spin ) : uIdleRef( *this ), uProcessorRef( *this ), uGlobalRef( *this ) {
    uCreateProcessor( uClus, ms, spin );
    uThisProcessor().uFork( this );             // processor executing this declaration forks the UNIX process

    if ( uProfiler::uProfiler_RegisterProcessor ) {
      (*uProfiler::uProfiler_RegisterProcessor)( uProfiler::uProfilerInstance, *this );
    } // if
} // uProcessor::uProcessor


uProcessor::~uProcessor() {
#ifdef __U_DEBUG_H__
    uDebugPrt( "(uProcessor &)0x%p.~uProcessor\n", this );
#endif // __U_DEBUG_H__
    if ( uProfiler::uProfiler_DeregisterProcessor ) {
      (*uProfiler::uProfiler_DeregisterProcessor)( uProfiler::uProfilerInstance, *this );
    } // if

    delete uProcTask;

    // Remove processor from global processor list. It is removed here because
    // the next action after this is the termination of the UNIX process in
    // uProcessorKernel. Therefore, even if the application is aborted and the
    // process is not on the list of UNIX processes for this application, the
    // current UNIX process will terminate itself.  It cannot be removed in
    // uProcessKernel because the uProcessor storage may be freed already by
    // another processor.

    uKernelModule::uGlobalProcessorLock->acquire();   // remove processorto global processor list.
    uKernelModule::uGlobalProcessors->uRemove( &(uGlobalRef) );
    uKernelModule::uGlobalProcessorLock->release();

    uCurrCluster->uProcessorRemove( *this );
#ifdef __U_MULTI__
    delete uContextEvent;
    delete uContextSwitchHandler;
    delete uEvents;
#endif // __U_MULTI__
    
#if defined( __i386__ ) && defined( __U_MULTI__ )
    uFreeLDT( ldtValue );
#endif // __i386__ && __U_MULTI__
} // uProcessor::~uProcessor


void uProcessor::uFork( uProcessor *np ) {
#ifdef __U_MULTI__
    uKernelModule::uInitialization = false;

#if defined( __linux__ )
    int flag = CLONE_VM | CLONE_FS | CLONE_FILES | SIGCHLD;
    // KTs created by the initial (system) kernel-thread (SKT) already have the
    // SKT as their parent. A KT created as a child of these sub-KTs is marked
    // with CLONE_PARENT so its parent is that of its creator, i.e., the
    // SKT. Hence, the SKT has the shell KT as its parent and all KTs created
    // directly or indirectly from the SKT have the SKT as their parent.
    if ( &uThisProcessor() != uKernelModule::uSystemProcessor ) {
      flag |= CLONE_PARENT;
    } // if

#if defined( __i386__ ) 
    np->uPid = clone( __U_START_KT_NAME__, (char *)(np->uProcessorKer.uBase), flag, np );
#elif defined( __ia64__ )
    np->uPid = __clone2( __U_START_KT_NAME__, (char *)(np->uProcessorKer.uLimit), np->uProcessorKer.uSize, flag, np );
#else
    #error uC++ internal error : unsupported architecture
#endif
    if ( np->uPid == -1 ) {
      uAbort( "(uProcessorKernel &)0x%p.uFork() : internal error, create failed for kernel thread, error(%d) %s.", this, errno, strerror( errno ) );
    } //if

#elif defined( __solaris__ )
    ucontext *ucp;
#if defined( __U_SWAPCONTEXT__ )
    ucp = (ucontext *)(np->uProcessorKer.uStorage);
    // _lwp_makecontext overwrites the existing context except for the signal mask
    // so the signal mask obtained from getcontext during uStartHere is used
#else
    ucontext uc;
    ucp = &uc;
    // make sure the new lwp has an empty signal mask at start
    sigemptyset( &uc.uc_sigmask );
#endif
    _lwp_makecontext( ucp, __U_START_KT_NAME__, np, NULL, (char *)(np->uProcessorKer.uLimit), np->uProcessorKer.uSize );
    int code = _lwp_create( ucp, 0, &np->uPid );
    if ( code != 0 ) {
      uAbort( "(uProcessorKernel &)0x%p.uFork() : internal error, create failed for kernel thread, error(%d) %s.", this, code, strerror( code ) );
    } // if
#elif defined( __irix__ )
    np->uPid = uKernelModule::uSystemProcessor->uProcCreate( np );
#else
    #error uC++ internal error : unsupported architecture
#endif

    uKernelModule::uInitialization = true;

#else

    np->uPid = uPid;

#endif // __U_MULTI__
} // uProcessor::uFork


void uProcessor::uSetContextSwitchEvent( uDuration duration ) {
    uAssert( THREAD_GETMEM( uDisableInt ) && THREAD_GETMEM( uDisableIntCnt ) == 1 );

    if ( ! uContextEvent->uListed() && duration != 0 ) { // first context switch event ?
      uContextEvent->timerT = uActiveProcessorKernel->uKernelClock.uGetTime() + duration;
      uContextEvent->timerD = duration;
      uEvents->uAddEvent( *uContextEvent );
    } else if ( duration > 0  && uContextEvent->timerD != duration ) { // if event is different from previous ? change it
      uEvents->uRemoveEvent( *uContextEvent );
      uContextEvent->timerT = uActiveProcessorKernel->uKernelClock.uGetTime() + duration;
      uContextEvent->timerD = duration;
      uEvents->uAddEvent( *uContextEvent );
    } else if ( duration == 0 && uContextEvent->timerT != 0 ) { // zero duration and current CS is nonzero ?
      uEvents->uRemoveEvent( *uContextEvent );
      uContextEvent->timerT = 0;   
      uContextEvent->timerD = 0;
    } // if
}; // uProcessor::uSetContextSwitchEvent


void uProcessor::uSetContextSwitchEvent( int msecs ) {
    uSetContextSwitchEvent( uDuration( msecs / 1000L, msecs % 1000L * ( TIMEGRAN / 1000L ) ) ); // convert msecs to uDuration type
} // uProcessor::uSetContextSwitchEvent


#ifdef __U_MULTI__
#if defined( __irix__ )
pid_t uProcessor::uProcCreate( uProcessor *np ) {
    return uProcTask->uProcCreate( np );
} // uProcessor::uProcCreate
#endif


#if defined( __irix__ ) ||  defined( __linux__ )
void uProcessor::uProcWait( pid_t pid ) {
    uProcTask->uProcWait( pid );
} // uProcessor::uProcWait
#endif


void uProcessor::uProcExit( int retcode ) {
    uProcTask->uProcExit( retcode );
} // uProcessor::uProcWait
#endif // __U_MULTI__


uClock &uProcessor::uGetClock() const {
    return *uProcessorClock;
} // uProcessor::uGetClock


pid_t uProcessor::uGetPid() const {
    return uPid;
} // uProcessor::uGetPid


uCluster &uProcessor::uGetCluster() const {
    return *uCurrCluster;
} // uProcessor::uGetCluster


uCluster &uProcessor::uSetCluster( uCluster &uClus ) {
  if ( &uClus == &(uThisProcessor().uGetCluster()) ) return uClus; // trivial case

    uCluster &prev = uClus;
    uProcTask->uSetCluster( uClus );                  // operation must be done by the processor itself
    return prev;
} // uProcessor::uSetCluster


uBaseTask &uProcessor::uGetTask() const {
    return *uCurrTask;
} // uProcessor::uGetTask


unsigned int uProcessor::uSetPreemption( unsigned int ms ) {
    int temp = ms;                              // make writable
#ifdef __U_DEBUG__
    if ( ms == 0 ) {                            // 0 => infinity, reset to minimum preemption second for local debugger
      temp = uMinPreemption;                    // approximate infinity
    } else if ( ms < 0 ) {                      // special case for testing, not for public consumption
      temp = 0;
    } // if
#endif // __U_DEBUG__
    int prev = uPreemption;
    uProcTask->uSetPreemption( temp );

    // Asynchronous with regard to other processors, synchronous with regard to
    // this processor.

    if ( &uThisProcessor() == this ) {
      uThisTask().uYield();
    } // if
    return prev;
} // uProcessor::uSetPreemption


unsigned int uProcessor::uGetPreemption() const {
    return uPreemption;
} // uProcessor::uGetPreemption


unsigned int uProcessor::uSetSpin( unsigned int spin ) {
    int prev = uSpin;
    uSpin = spin;
    return prev;
} // uProcessor::uSetSpin


unsigned int uProcessor::uGetSpin() const {
    return uSpin;
} // uProcessor::uGetSpin


bool uProcessor::uIdle() const {
    return uIdleRef.uListed();
} // uProcessor::uIdle


#if defined( __i386__ ) && defined( __U_MULTI__ )
// elements of the bitset are "true" if the corresponding ldt index
// is available for use; initialize all elements to true initially
uBitSet< U_MAX_LDT > uProcessor::uLDTFreeSet;

uOwnerLock uProcessor::uLDTFreeSetLock;
bool uProcessor::uLDTFreeSetInit = false;


int uProcessor::uAllocLDT() {
    if ( uKernelModule::uInitialization ) uLDTFreeSetLock.acquire();

    if ( ! uLDTFreeSetInit ) {
        uLDTFreeSet.setAll();
        uLDTFreeSetInit = true;
    } // if
    
    int idx = uLDTFreeSet.findFirstSet();
    if ( idx == -1 ) {
        uAbort( "uProcessor::uAllocLDT() : internal error, LDT space exhausted." );
    } // if
    uLDTFreeSet.clr( idx );

    if ( uKernelModule::uInitialization ) uLDTFreeSetLock.release();

#ifdef __U_DEBUG_H__
    if( uKernelModule::uInitialization ) uDebugPrt( buffer, "uProcessor::uAllocLDT allocated new ldt entry %d (index %d)\n", idx * 8 + 7, idx );
#endif // __U_DEBUG_H__

    return idx * 8 + 7;
} // uProcessor::uAllocLDT


void uProcessor::uFreeLDT( int oldLDT ) {
#ifdef __U_DEBUG_H__
    uDebugPrt( buffer, "uProcessor::uFreeLDT adding ldt entry %d (index %d) to free set\n", oldLDT, ( oldLDT - 7 ) / 8 );
#endif // __U_DEBUG_H__

    uLDTFreeSetLock.acquire();
    uLDTFreeSet.set( ( oldLDT - 7 ) / 8 );
    uLDTFreeSetLock.release();
} // uProcessor::uFreeLDT
#endif // __i386__ && __U_MULTI__


// Local Variables: //
// compile-command: "gmake install" //
// End: //

Generated by  Doxygen 1.6.0   Back to index