Logo Search packages:      
Sourcecode: u++ version File versions

uBaseTask.cc

//                              -*- Mode: C++ -*- 
// 
// uC++ Version 5.0.1, Copyright (C) Peter A. Buhr 1996
// 
// uBaseTask.cc -- 
// 
// Author           : Peter A. Buhr
// Created On       : Mon Jan  8 16:14:20 1996
// Last Modified By : Peter A. Buhr
// Last Modified On : Thu Sep  2 13:40:45 2004
// Update Count     : 191
//
// This  library is free  software; you  can redistribute  it and/or  modify it
// under the terms of the GNU Lesser General Public License as published by the
// Free Software  Foundation; either  version 2.1 of  the License, or  (at your
// option) any later version.
// 
// This library is distributed in the  hope that it will be useful, but WITHOUT
// ANY  WARRANTY;  without even  the  implied  warranty  of MERCHANTABILITY  or
// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public License
// for more details.
// 
// You should  have received a  copy of the  GNU Lesser General  Public License
// along  with this library.
// 

#define __U_KERNEL__
#define __U_PROFILE__
#define __U_PROFILEABLE_ONLY__


#include <uC++.h>
#include <uProfiler.h>
//#include <uDebug.h>


//######################### uBaseTaskDL #########################


uBaseTaskDL::uBaseTaskDL( uBaseTask &w ) : uWho( w ) {}
uBaseTask &uBaseTaskDL::uGet() const { return uWho; }


//######################### uBaseTask #########################


void uBaseTask::uCreateTask( uCluster &uClus ) {
#ifdef __U_DEBUG__
    uCurrSerialOwner = this;
    uCurrSerialCount = 1;
    uCurrSerialLevel = 0;
#endif // __U_DEBUG__
    uState = uStart;
    uRecursion = uMutexRecursion = 0;
    uCurrCluster = &uClus;                      // remember the cluster task is created on
    uCurrCoroutine = this;                      // the first coroutine that a task executes is itself
    uAcceptedCall = NULL;                               // no accepted mutex entry yet
    uPriority = uActivePriority = 0;
    uInheritTask = this;
    uCurrentSerial = NULL;                      // remember outer most serial called; temp as no acc to serinst

    // exception handling

    uTerminateRtn = uEHM::uTerminate;                 // initialize default terminate routine

    // profiling

    uProfileActive = false;                     // can be read before uTaskConstructor is called

    // debugging
#if __U_LOCALDEBUGGER_H__
    DebugPCandSRR = NULL;
    uProcessBP = false;                         // used to prevent triggering breakpoint while processing one
#endif // __U_LOCALDEBUGGER_H__

    // pthreads

    pthreadData = NULL;
} // uBaseTask::uCreateTask


uBaseTask::uBaseTask( uCluster &uClus, uProcessor &uProc ) : uBaseCoroutine( uClus.uGetStackSize() ), uClusterRef( *this ), uReadyRef( *this ), uEntryRef( *this ), uMutexRef( *this ), uBound( uProc ) {
    uCreateTask( uClus );
} // uBaseTask::uBaseTask


void uBaseTask::uSetState( uBaseTask::uTaskState state ) {
    if ( uProfileActive && uProfiler::uProfiler_RegisterTaskExecState ) { 
      (*uProfiler::uProfiler_RegisterTaskExecState)( uProfiler::uProfilerInstance, *this, state ); 
    } // if

    uState = state;
} // uBaseTask::uSetState


void uBaseTask::uWake() {
    uSetState( uReady );                        // task is marked available for execution
    uCurrCluster->uMakeTaskReady( *this );            // put the task on the ready queue of the cluster
} // uBaseTask::uWake


uBaseTask &uBaseTask::uGetInheritTask() {
    return *uInheritTask;
} // uBaseTask::uGetInheritTask


int uBaseTask::uSetActivePriority( uBaseTask &t ) {
    int temp = uActivePriority;
    uInheritTask = &t;
    uActivePriority = uInheritTask->uGetActivePriority();
    return temp;
} // uBaseTask::uSetActivePriority


int uBaseTask::uSetActivePriority( int p ) {
    int temp = uActivePriority;
    uActivePriority = p;
    return temp;
} // uBaseTask::uSetActivePriority


int uBaseTask::uSetBasePriority( int p ) {
    int temp = uPriority;
    uPriority = p;
    return temp;
} // uBaseTask::uSetBasePriority


int uBaseTask::uSetActiveQueue( int q ) {
    int temp = uActiveQueueIndex;
    // uInheritTask = &t;  is this needed or should this just be called from uSetActivePriority ??
    uActiveQueueIndex = q;
    return temp;
} // uBaseTask::uSetActiveQueue


int uBaseTask::uSetBaseQueue( int q ) {
    int temp = uQueueIndex;
    uQueueIndex = q;
    return temp;
} // uBaseTask::uSetBaseQueue


uSerial &uBaseTask::uSetSerial( uSerial &s ) {
    uSerial *temp = uCurrentSerial;
    uCurrentSerial = &s;
    return *temp;
} // uBaseTask::uSetSerial


uSerial *uBaseTask::uGetSerial() const {
    return uCurrentSerial;
} // uBaseTask::uGetSerial


uBaseTask::uBaseTask() : uClusterRef( *this ), uReadyRef( *this ), uEntryRef( *this ), uMutexRef( *this ), uBound( *(uProcessor *)0 ) {
    uCreateTask( uThisCluster() );
} // uBaseTask::uBaseTask


uBaseTask::uBaseTask( unsigned int stacksize ) : uBaseCoroutine ( stacksize ), uClusterRef( *this ), uReadyRef( *this ), uEntryRef( *this ), uMutexRef( *this ), uBound( *(uProcessor *)0 ) {
    uCreateTask( uThisCluster() );
} // uBaseTask::uBaseTask


uBaseTask::uBaseTask( uCluster &uClus ) : uBaseCoroutine( uClus.uGetStackSize() ), uClusterRef( *this ), uReadyRef( *this ), uEntryRef( *this ), uMutexRef( *this ), uBound( *(uProcessor *)0 ) {
    uCreateTask( uClus );
} // uBaseTask::uBaseTask


uBaseTask::uBaseTask( uCluster &uClus, unsigned int stacksize ) : uBaseCoroutine( stacksize ), uClusterRef( *this ), uReadyRef( *this ), uEntryRef( *this ), uMutexRef( *this ), uBound( *(uProcessor *)0 ) {
    uCreateTask( uClus );
} // uBaseTask::uBaseTask


uBaseTask::~uBaseTask() {
} // uBaseTask::~uBaseTask


uCluster &uBaseTask::uGetCluster() const {
    return *uCurrCluster;
} // uBaseTask::uGetCluster


int uBaseTask::uGetActivePriority() const {
    // special case for base of active priority stack
    return ( (uBaseTask *)this == uInheritTask ) ? uPriority : uActivePriority;
} // uBaseTask::uGetActivePriority


int uBaseTask::uGetActivePriorityValue() const {      // TEMPORARY: replace previous member?
    return uActivePriority;
} // uBaseTask::uGetActivePriorityValue


int uBaseTask::uGetBasePriority() const {
    return uPriority;
} // uBaseTask::uGetBasePriority


int uBaseTask::uGetActiveQueueValue() const {         // TEMPORARY: rename
    return uActiveQueueIndex;
} // uBaseTask::uGetActiveQueueValue


int uBaseTask::uGetBaseQueue() const {
    return uQueueIndex;
} // uBaseTask::uGetBaseQueue


uCluster &uBaseTask::uMigrate( uCluster &uClus ) {
#ifdef __U_DEBUG_H__
    uDebugPrt( "(uBaseTask &)0x%p.uMigrate, from cluster:0x%p to cluster:0x%p\n", this, &uThisCluster(), &uClus );
#endif // __U_DEBUG_H__

    uAssert( &uBound == NULL );

#ifdef __U_DEBUG__
    if ( this != &uThisTask() ) {
      uAbort( ": attempt to migrate task %.256s (0x%p) to cluster %.256s (0x%p).\n"
            "A task may only migrate itself to another cluster.",
            uGetName(), this, uClus.uGetName(), &uClus );
    } // if
#endif // __U_DEBUG__

    // A simple optimization: migrating to the same cluster that the task is
    // currently executing on simply returns the value of the current cluster.
    // Therefore, migrate does not always produce a context switch.

  if ( &uClus == uCurrCluster ) return uClus;

#if __U_LOCALDEBUGGER_H__
    if ( uLocalDebugger::uLocalDebuggerActive ) uLocalDebugger::uLocalDebuggerInstance->checkPoint();
#endif // __U_LOCALDEBUGGER_H__

    // Remove the task from the list of tasks that live on this cluster,
    // and add it to the list of tasks that live on the new cluster.

    uCluster &uPrev = *uCurrCluster;                  // save for return

    if ( uProfileActive && uProfiler::uProfiler_RegisterTaskMigrate ) { // task registered for profiling ?              
      (*uProfiler::uProfiler_RegisterTaskMigrate)( uProfiler::uProfilerInstance, *this, uPrev, uClus );
    } // if

    // Interrupts are disabled because once the task is removed from a cluster
    // it is dangerous for it to be placed back on that cluster during an
    // interrupt.  Therefore, interrupts are disabled until the task is on its
    // new cluster.

    THREAD_GETMEM( uSelf )->uDisableInterrupts();

    uPrev.uTaskRemove( *this );                       // remove from current cluster
    uCurrCluster = &uClus;                      // change task's notion of which cluster it is executing on
    uClus.uTaskAdd( *this );                    // add to new cluster

    THREAD_GETMEM( uSelf )->uEnableInterrupts();

#if __U_LOCALDEBUGGER_H__
    if ( uLocalDebugger::uLocalDebuggerActive ) uLocalDebugger::uLocalDebuggerInstance->migrateULThread( uClus );
#endif // __U_LOCALDEBUGGER_H__

    // Force a context switch so the task is scheduled on the new cluster.

    uYield();

    return uPrev;                         // return reference to previous cluster
} // uBaseTask::uMigrate


void uBaseTask::uYieldNoPoll() {
    uAssert( ! THREAD_GETMEM( uDisableIntSpin ) );
#ifdef __U_DEBUG__
    if ( this != &uThisTask() ) {
      uAbort( ": attempt to yield the execution of task %.256s (0x%p) by task %.256s (0x%p).\n"
            "A task may only yield itself.",
            uGetName(), this, uThisTask().uGetName(), &uThisTask() );
    } // if
#endif // __U_DEBUG__

    uSCHEDULE( this );                          // find someone else to execute; wake on kernel stack
} // uBaseTask::uYieldNoPoll


void uBaseTask::uYield( unsigned int times ) {
    for ( ; times > 0 ; times -= 1 ) {
      uYield();
    } // for
} // uBaseTask::uYield


void uBaseTask::uYieldYield( unsigned int times ) {   // inserted by translator for -yield
    // Calls to uYieldYield can be inserted in any inlined routine, which can
    // than be called from a uWhen clause, resulting in an attempt to context
    // switch while holding a spin lock. To ensure assert checking in normal
    // usages of uYield, this check cannot be inserted in uYield.

    if ( ! THREAD_GETMEM( uDisableIntSpin ) ) {
      uYield( times );
    } // if
} // uBaseTask::uYieldYield


void uBaseTask::uYieldInvoluntary() {
    uAssert( ! THREAD_GETMEM( uDisableIntSpin ) );
#ifdef __U_DEBUG__
    if ( this != &uThisTask() ) {
      uAbort( ": attempt to yield the execution of task %.256s (0x%p) by task %.256s (0x%p).\n"
            "A task may only yield itself.",
            uGetName(), this, uThisTask().uGetName(), &uThisTask() );
    } // if
#endif // __U_DEBUG__

    // Are the uC++ kernel memory allocation hooks active?
    if ( uProfileActive && uProfiler::uProfiler_PreallocateMetricMemory ) {
      // create a preallocated memory array on the stack
      void *ptrs[U_MAX_METRICS];

      (*uProfiler::uProfiler_PreallocateMetricMemory)( uProfiler::uProfilerInstance, ptrs, *this );

      THREAD_GETMEM( uSelf )->uDisableInterrupts();
      (*uProfiler::uProfiler_SetMetricMemoryPointers)( uProfiler::uProfilerInstance, ptrs, *this ); // force task to use local memory array
      uActiveProcessorKernel->uSchedule( this );      // find someone else to execute; wake on kernel stack
      (*uProfiler::uProfiler_ResetMetricMemoryPointers)( uProfiler::uProfilerInstance, *this );     // reset task to use its native memory array
      THREAD_GETMEM( uSelf )->uEnableInterrupts();

      // free any blocks of memory not used by metrics
      for ( int metric = 0; metric < uProfiler::uProfilerInstance->numMemoryMetrics; metric += 1 ) {
          if ( ptrs[metric] ) {
            free( ptrs[metric] );
          } // if
      } // for
    } else {
      THREAD_GETMEM( uSelf )->uDisableInterrupts();
      uActiveProcessorKernel->uSchedule( this );      // find someone else to execute; wake on kernel stack
      THREAD_GETMEM( uSelf )->uEnableInterrupts();
    } // if
} // uBaseTask::uYieldInvoluntary


void uBaseTask::uSleep( uTime time ) {
#ifdef __U_DEBUG__
    if ( this != &uThisTask() ) {
      uAbort( ": attempt to put task %.256s (0x%p) to sleep.",
            uGetName(), this );
    } // if
#endif // __U_DEBUG__

  if ( time <= uActiveProcessorKernel->uKernelClock.uGetTime() ) return;

    uWakeupHndlr handler( *this );              // handler to wake up blocking task
    uEventNode uRTEvent( *this, handler, time );      // event node for event list
    uThisProcessor().uEvents->uAddEvent( uRTEvent, uThisProcessor(), true );
} // uBaseTask::uSleep


void uBaseTask::uSleep( uDuration duration ) {
    uSleep( uActiveProcessorKernel->uKernelClock.uGetTime() + duration );
} // uBaseTask::uSleep


void uBaseTask::uProfileActivate( uBaseTask &t ) {
    if ( ! uProfileSamplerInstance ) {                // already registered for profiling ?
      if ( uProfiler::uProfiler_RegisterTask ) {      // task registered for profiling ? 
          (*uProfiler::uProfiler_RegisterTask)( uProfiler::uProfilerInstance, *this, *serial, t );
          uProfileActive = true;
      } // if
    } else {
      uProfileActive = true;
    } // if 
} // uBaseTask::uProfileActivate


void uBaseTask::uProfileActivate() {
    uProfileActivate( *(uBaseTask *)0 );
} // uBaseTask::uProfileActivate


void uBaseTask::uProfileInactivate() {
    uProfileActive = false;
} // uBaseTask::uProfileInactivate


void uBaseTask::uPrintCallStack() const {
    if ( uProfileSamplerInstance ) {
      (*uProfiler::uProfiler_printCallStack)( uProfileSamplerInstance );
    } // if
} // uBaseTask::uPrintCallStack


// Local Variables: //
// compile-command: "gmake install" //
// End: //

Generated by  Doxygen 1.6.0   Back to index