Logo Search packages:      
Sourcecode: u++ version File versions

uCluster.cc

//                              -*- Mode: C++ -*- 
// 
// uC++ Version 5.0.1, Copyright (C) Peter A. Buhr 1994
// 
// uCluster.cc -- 
// 
// Author           : Peter Buhr
// Created On       : Mon Mar 14 17:34:24 1994
// Last Modified By : Peter A. Buhr
// Last Modified On : Tue Sep  7 11:57:21 2004
// Update Count     : 372
//
// This  library is free  software; you  can redistribute  it and/or  modify it
// under the terms of the GNU Lesser General Public License as published by the
// Free Software  Foundation; either  version 2.1 of  the License, or  (at your
// option) any later version.
// 
// This library is distributed in the  hope that it will be useful, but WITHOUT
// ANY  WARRANTY;  without even  the  implied  warranty  of MERCHANTABILITY  or
// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public License
// for more details.
// 
// You should  have received a  copy of the  GNU Lesser General  Public License
// along  with this library.
// 


#define __U_KERNEL__


#include <uC++.h>
#include <uAssert.h>
//#include <uDebug.h>

#if defined( __solaris__ )
#include <sys/lwp.h>                            // needed for _lwp_kill
#endif

extern uCluster &uThisCluster() {
    return *THREAD_GETMEM( uActiveCluster );
} // uThisCluster


//######################### uClusterDL #########################


uClusterDL::uClusterDL( uCluster &w ) : uWho( w ) {}
uCluster &uClusterDL::uGet() const { return uWho; }


//######################### uCluster #########################


void *uCluster::operator new( size_t, void *storage ) {
    return storage;
} // uCluster::operator new

void *uCluster::operator new( size_t size ) {
    return ::operator new( size );
} // uCluster::operator new


void uCluster::uWakeProcessor( int uPid ) {
#ifdef __U_DEBUG_H__
    uDebugPrt( "(uCluster &)0x%p.uWakeProcessor: waking processor %d\n", this, uPid );
#endif // __U_DEBUG_H__

#if defined( __solaris__ )
    _lwp_kill( uPid, SIGALRM );
#else
    kill( uPid, SIGALRM );                      // wake it up, as it is probably sleeping
#endif
} // uCluster::uWakeProcessor


void uCluster::uProcessorPause() {
    uAssert( THREAD_GETMEM( uDisableInt ) && THREAD_GETMEM( uDisableIntCnt ) > 0 );

    // Check the ready queue to make sure that no task managed to slip onto the
    // queue since the processor last checked.

    uReadyIdleTaskLock.acquire();
    if ( uReadyTasksEmpty() && uThisProcessor().uExternal.uEmpty() ) {
      // stop generating SIGALRM signals on this processor until woken up

      uThisProcessor().uSetContextSwitchEvent( 0 );   // turn off context-switching

      // Block any SIGALRM signals from arriving.
      sigset_t new_mask, mask;
      sigemptyset( &new_mask );
      sigaddset( &new_mask, SIGALRM );
      if ( sigprocmask( SIG_BLOCK, &new_mask, &mask ) == -1 ) {
          uAbort( "internal error, sigprocmask" );
      } // if

      if ( THREAD_GETMEM( uInKernelRF ) ) { // in kernel roll-forward flag on ?
              uReadyIdleTaskLock.release();
            if ( sigprocmask( SIG_SETMASK, &mask, NULL ) == -1 ) {
                uAbort( "internal error, sigprocmask" );
            } // if
            THREAD_GETMEM( uSelf )->uRollForward( true ); // make sure to do chores
      } else {
          uIdleProcessors.uAddTail( &(uThisProcessor().uIdleRef) );
          uReadyIdleTaskLock.release();

          // Install the old signal mask and wait for a signal to arrive.

#ifdef __U_DEBUG_H__
          uDebugPrt( "(uCluster &)0x%p.uProcessorPause, before sigpause\n", this );
#endif // __U_DEBUG_H__
          sigsuspend( &mask );
          if ( sigprocmask( SIG_SETMASK, &mask, NULL ) == -1 ) {
            uAbort( "internal error, sigprocmask" );
          } // if
#ifdef __U_DEBUG_H__
          uDebugPrt( "(uCluster &)0x%p.uProcessorPause, after sigpause\n", this );
#endif // __U_DEBUG_H__

          // A UNIX process may be woken by any signal, e.g. SIGCHLD, so it is
          // necessary to check and remove the processor from the idle queue.
          // Normally a processor is removed in uMakeTaskReady.

          if ( uThisProcessor().uIdle() ) {
            uReadyIdleTaskLock.acquire();
            if ( uThisProcessor().uIdle() ) {
                uIdleProcessors.uRemove( &(uThisProcessor().uIdleRef) );
            } // if
            uReadyIdleTaskLock.release();
          } // if

          // Just woken up after an alarm but in kernel/library code, so no
          // actual popping from the event list took place in the sigalarm
          // handler (i.e., signal alarm handler just ignored the interrupt).
          // Therefore, do a roll-forward to ensure that any necessary events
          // are popped from the event list.

          THREAD_GETMEM( uSelf )->uRollForward( true );
      } // if

      // Reset the context-switch

#ifdef __U_DEBUG_H__
      uDebugPrt( "(uCluster &)0x%p.uProcessorPause, reset timeslice:%d\n", this, uThisProcessor().uGetPreemption() );
#endif // __U_DEBUG_H__
      uThisProcessor().uSetContextSwitchEvent( uThisProcessor().uGetPreemption() );
    } else {
      uReadyIdleTaskLock.release();
    } // if

    uAssert( THREAD_GETMEM( uDisableInt ) && THREAD_GETMEM( uDisableIntCnt ) > 0 );
} // uCluster::uProcessorPause


void uCluster::uMakeProcessorIdle( uProcessor &p ) {
    uReadyIdleTaskLock.acquire();
    uIdleProcessors.uAddTail( &(p.uIdleRef) );
    uReadyIdleTaskLock.release();
} // uCluster::uMakeProcessorIdle


void uCluster::uMakeProcessorActive( uProcessor &p ) {
    uReadyIdleTaskLock.acquire();
    if ( p.uIdle() ) {                          // processor on idle queue ?
      uIdleProcessors.uRemove( &(p.uIdleRef) );
    } // if
    uReadyIdleTaskLock.release();
} // uCluster::uMakeProcessorActive


void uCluster::uMakeProcessorActive() {
    uReadyIdleTaskLock.acquire();
    if ( ! uReadyTasks->uEmpty() && ! uIdleProcessors.uEmpty() ) {
      int uPid = uIdleProcessors.uDropHead()->uGet().uPid;
      uReadyIdleTaskLock.release();             // don't hold lock while sending SIGALRM
      uWakeProcessor( uPid );
    } else {
      uReadyIdleTaskLock.release();
    } // if
} // uCluster::uMakeProcessorActive


bool uCluster::uReadyTasksEmpty() {
    return uReadyTasks->uEmpty();
} // uCluster::uReadyTasksEmpty


void uCluster::uMakeTaskReady( uBaseTask &uReadyTask ) {
    uReadyIdleTaskLock.acquire();
    if ( &uReadyTask.uBound != NULL ) {               // task bound to a specific processor ?
#ifdef __U_DEBUG_H__
      uDebugPrt( "(uCluster &)0x%p.uMakeTaskReady(1): task %.256s (0x%p) makes task %.256s (0x%p) ready\n",
              this, uThisTask().uGetName(), &uThisTask(), uReadyTask.uGetName(), &uReadyTask );
#endif // __U_DEBUG_H__
      uProcessor *p = &uReadyTask.uBound;       // optimization
      p->uExternal.uAddTail( &(uReadyTask.uReadyRef) ); // add task to end of special ready queue
#ifdef __U_MULTI__
      if ( p->uIdle() ) {                       // processor on idle queue ?
          uIdleProcessors.uRemove( &(p->uIdleRef) );
          int uPid = p->uPid;
          uReadyIdleTaskLock.release();         // don't hold lock while sending SIGALRM
          uWakeProcessor( uPid );
      } else {
          uReadyIdleTaskLock.release();
      } // if
#else
      uReadyIdleTaskLock.release();
#endif // __U_MULTI__
    } else {
#ifdef __U_DEBUG_H__
      uDebugPrt( "(uCluster &)0x%p.uMakeTaskReady(2): task %.256s (0x%p) makes task %.256s (0x%p) ready\n",
               this, uThisTask().uGetName(), &uThisTask(), uReadyTask.uGetName(), &uReadyTask );
#endif // __U_DEBUG_H__
#ifdef __U_MULTI__
      // Wake up an idle processor if the ready task is migrating to another
      // cluster with idle processors or if the ready task is on the same
      // cluster but the ready queue of that cluster is not empty. This check
      // prevents a single task on a cluster, which does a yield, from
      // unnecessarily waking up a processor that has no work to do.

      if ( ! uIdleProcessors.uEmpty() && ( &uThisCluster() != this || ! uReadyTasks->uEmpty() ) ) {
          uReadyTasks->uAdd( &(uReadyTask.uReadyRef) ); // add task to end of cluster ready queue
          int uPid = uIdleProcessors.uDropHead()->uGet().uPid;
          uReadyIdleTaskLock.release();         // don't hold lock while sending SIGALRM
          uWakeProcessor( uPid );
      } else {
          uReadyTasks->uAdd( &(uReadyTask.uReadyRef) ); // add task to end of cluster ready queue
          uReadyIdleTaskLock.release();
      } // if
#else
      uReadyTasks->uAdd( &(uReadyTask.uReadyRef) );   // add task to end of cluster ready queue
      uReadyIdleTaskLock.release();
#endif // __U_MULTI__
    } // if
} // uCluster::uMakeTaskReady


uBaseTask &uCluster::uReadyTaskTryRemove() {
    // Select a task from the ready queue of this cluster if there are no ready
    // tasks, return the nil pointer.

    uBaseTask *t;

    uReadyIdleTaskLock.acquire();
    if ( ! uReadyTasksEmpty() ) {
      t = &(uReadyTasks->uDrop()->uGet());
    } else {
      t = NULL;
    } // if
    uReadyIdleTaskLock.release();
    return *t;
} // uCluster::uReadyTaskTryRemove


void uCluster::uTaskAdd( uBaseTask &t ) {
    uReadyIdleTaskLock.acquire();
    uTasksOnCluster.uAddTail( &(t.uClusterRef) );
    if ( &t.uBound == NULL ) uReadyTasks->uAddInitialize( uTasksOnCluster ); // processor task is not part of normal initialization
    uReadyIdleTaskLock.release();
} // uCluster::uTaskAdd


void uCluster::uTaskRemove( uBaseTask &t ) {
    uReadyIdleTaskLock.acquire();
    uTasksOnCluster.uRemove( &(t.uClusterRef) );
    if ( &t.uBound == NULL ) uReadyTasks->uRemoveInitialize( uTasksOnCluster ); // processor task is not part of normal initialization
    uReadyIdleTaskLock.release();
} // uCluster::uTaskRemove


void uCluster::uTaskReschedule( uBaseTask &t ) {
    uReadyIdleTaskLock.acquire();
    uReadyTasks->uRescheduleTask( &(t.uClusterRef), uTasksOnCluster );
    uReadyIdleTaskLock.release();
} // uCluster::uTaskReschedule


void uCluster::uTaskResetPriority( uBaseTask &owner, uBaseTask &calling ) { // TEMPORARY
#ifdef __U_DEBUG_H__
    uDebugPrt( "(uCluster &)0x%p.uTaskResetPriority, owner:0x%p, calling:0x%p, owner's cluster:0x%p\n", this, &owner, &calling, owner.uCurrCluster );
#endif // __U_DEBUG_H__
    uReadyIdleTaskLock.acquire();
    if ( &uThisCluster() == owner.uCurrCluster ) {
      if ( uReadyTasks->uCheckPriority( owner.uReadyRef, calling.uReadyRef ) ) {
          uReadyTasks->uResetPriority( owner.uReadyRef, calling.uReadyRef );
      } // if
    } // if
    uReadyIdleTaskLock.release();
} // uCluster::uTaskResetPriority


void uCluster::uTaskSetPriority( uBaseTask &owner, uBaseTask &calling ) {
#ifdef __U_DEBUG_H__
    uDebugPrt( "(uCluster &)0x%p.uTaskSetPriority, owner:0x%p, calling:0x%p, owner's cluster:0x%p\n", this, &owner, &calling, owner.uCurrCluster );
#endif // __U_DEBUG_H__
    uReadyIdleTaskLock.acquire();
    uReadyTasks->uResetPriority( owner.uReadyRef, calling.uReadyRef );
    uReadyIdleTaskLock.release();
} // uCluster::uTaskSetPriority


void uCluster::uProcessorAdd( uProcessor &p ) {
    uProcessorsOnClusterLock.acquire();
    uProcessorsOnCluster.uAddTail( &(p.uProcessorRef) );
    uProcessorsOnClusterLock.release();
} // uCluster::uProcessorAdd


void uCluster::uProcessorRemove( uProcessor &p ) {
    uProcessorsOnClusterLock.acquire();
    uProcessorsOnCluster.uRemove( &(p.uProcessorRef) );
    uProcessorsOnClusterLock.release();
} // uCluster::uProcessorRemove


void uCluster::uCreateCluster( unsigned int stacksize, const char *name ) {
#ifdef __U_DEBUG_H__
    uDebugPrt( "(uCluster &)0x%p.uCreateCluster\n", this );
#endif // __U_DEBUG_H__

#ifdef __U_DEBUG__
#ifdef __U_MULTI__
    uDebugIgnore = false;
#else
    uDebugIgnore = true;
#endif // __U_MULTI__
#endif // __U_DEBUG__

    uSetName( name );
    uSetStackSize( stacksize );

#if __U_LOCALDEBUGGER_H__
    if ( uLocalDebugger::uLocalDebuggerActive ) uLocalDebugger::uLocalDebuggerInstance->checkPoint();
#endif // __U_LOCALDEBUGGER_H__

    uKernelModule::uGlobalClusterLock->acquire();
    uKernelModule::uGlobalClusters->uAddTail( &uGlobalRef );
    uKernelModule::uGlobalClusterLock->release();

#if __U_LOCALDEBUGGER_H__
    if ( uLocalDebugger::uLocalDebuggerActive ) uLocalDebugger::uLocalDebuggerInstance->createCluster( *this );
#endif // __U_LOCALDEBUGGER_H__

    if ( uReadyTasks == NULL ) {
      uReadyTasks = new uDefaultScheduler;
      uDefaultReadyTasks = true;
    } else {
      uDefaultReadyTasks = false;
    } // if

#ifdef __U_MULTI__
    NBIO = new uNBIO;
#endif // __U_MULTI__
} // uCluster::uCreateCluster


uCluster::uCluster( unsigned int stacksize, const char *name ) : uGlobalRef( *this ), uReadyTasks( NULL ) {
    uCreateCluster( stacksize, name );
} // uCluster::uCluster


uCluster::uCluster( const char *name ) : uGlobalRef( *this ), uReadyTasks( NULL ) {
    uCreateCluster( uDefaultStackSize(), name );
} // uCluster::uCluster


uCluster::uCluster( uBaseSchedule<uBaseTaskDL> &ReadyQueue, unsigned int stacksize, const char *name ) : uGlobalRef( *this ), uReadyTasks( &ReadyQueue ) {
    uCreateCluster( stacksize, name );
} // uCluster::uCluster


uCluster::uCluster( uBaseSchedule<uBaseTaskDL> &ReadyQueue, const char *name ) : uGlobalRef( *this ), uReadyTasks( &ReadyQueue ) {
    uCreateCluster( uDefaultStackSize(), name );
} // uCluster::uCluster


const int uCluster::uReadSelect = 1;
const int uCluster::uWriteSelect = 2;
const int uCluster::uExceptSelect = 4;


int uCluster::uSelect( int fd, int rwe, timeval *timeout ) {
    return NBIO->uSelect( fd, rwe, timeout );
} // uCluster::uSelect


int uCluster::uSelect( fd_set *rfd, fd_set *wfd, fd_set *efd, timeval *timeout ) {
    return NBIO->uSelect( FD_SETSIZE, rfd, wfd, efd, timeout );
} // uCluster::uSelect


int uCluster::uSelect( int nfds, fd_set *rfd, fd_set *wfd, fd_set *efd, timeval *timeout ) {
    return NBIO->uSelect( nfds, rfd, wfd, efd, timeout );
} // uCluster::uSelect


uCluster::~uCluster() {
#ifdef __U_DEBUG_H__
    uDebugPrt( "(uCluster &)0x%p.~uCluster\n", this );
#endif // __U_DEBUG_H__

#ifdef __U_MULTI__
    delete NBIO;
#endif // __U_MULTI__

#ifdef __U_DEBUG__
    // Must check for processors before tasks because each processor has a
    // processor task, and hence, there is always a task on the cluster.
    uProcessorDL *pr;
    uProcessorsOnClusterLock.acquire();
    for ( uSeqGen<uProcessorDL> gen2(uProcessorsOnCluster); gen2 >> pr; ) {
      uAbort( ": attempt to delete cluster %.256s (0x%p) with processor 0x%p still on it.\n"
            "Possible cause is the processor has not been deleted.",
            uGetName(), this, &(pr->uGet()) );
    } // for
    uProcessorsOnClusterLock.release();

    uBaseTaskDL *tr;
    uReadyIdleTaskLock.acquire();
    for ( uSeqGen<uBaseTaskDL> gen1(uTasksOnCluster); gen1 >> tr; ) {
      uAbort( ": attempt to delete cluster %.256s (0x%p) with task %.256s (0x%p) still on it.\n"
            "Possible cause is the task has not been deleted.",
            uGetName(), this, tr->uGet().uGetName(), &(tr->uGet()) );
    } // for
    uReadyIdleTaskLock.release();
#endif // __U_DEBUG__

    if ( uDefaultReadyTasks ) {                       // delete if cluster allocated it
      delete uReadyTasks;
    } // if

#if __U_LOCALDEBUGGER_H__
    if ( uLocalDebugger::uLocalDebuggerActive ) uLocalDebugger::uLocalDebuggerInstance->destroyCluster( *this );
#endif // __U_LOCALDEBUGGER_H__

    uKernelModule::uGlobalClusterLock->acquire();
    uKernelModule::uGlobalClusters->uRemove( &uGlobalRef );
    uKernelModule::uGlobalClusterLock->release();
} // uCluster::~uCluster

const char *uCluster::uSetName( const char *name ) {
    const char *prev = uName;
    uName = name;
    return prev;
} // uCluster::uSetName

const char *uCluster::uGetName() const {
    return
#ifdef __U_DEBUG__
      ( uName == NULL || uName == (const char *)-1 ) ? "*unknown*" : // storage might be scrubbed
#endif // __U_DEBUG__
      uName;
} // uCluster::uGetName

unsigned int uCluster::uSetStackSize( unsigned int stacksize ) {
    unsigned int prev = uStackSize;
    uStackSize = stacksize;
    return prev;
} // uCluster::uSetStackSize

unsigned int uCluster::uGetStackSize() const {
    return uStackSize;
} // uCluster::uGetStackSize

void uCluster::uCloseFD( int fd ) {
    NBIO->uCloseFD(fd);
} // uCluster::uCloseFD


// Local Variables: //
// compile-command: "gmake install" //
// End: //

Generated by  Doxygen 1.6.0   Back to index