CSpace.h

// CSpace.h
//
// Author David Barrett-Lennard  
// (C)opyright Cedanet Pty Ltd 2006

@import "cxObject.h"
@import "IObject.h"
#include <atomic>
#include <functional>

namespace ceda
{
$enum+ class ECSpaceLockMode
{
    SharedRead,
    Exclusive,
    Transaction     // A transaction lock means an exclusive lock where Begin/End txn operation 
                    // callbacks are generated
};

$struct+ LockInfo
{
    ECSpaceLockMode mode;
};

///////////////////////////////////////////////////////////////////////////////////////////////////
// CSpace

$adt+ CSpace
{
    // Destroy the CSpace.  The CSpace must not be locked (except perhaps internally by the GC 
    // thread)
    void Destroy() const;

    // Each CSpace contains a fixed size array of pointers that can be read/written by clients
    void** GetPtrSlotArray();

    ///////////////////////////////////////////////////////////////////////////////////////////////
    // Locking

    // Blocks the calling thread until it successfully gains a lock on the CSpace.
    // Locking multiple CSpaces must be done in a specific order, see the documentation for
    // LockMultiplePSpaces().
    void Lock(ECSpaceLockMode mode);

    // Release the lock on the CSpace
    // Must be called by the thread that locked the CSpace by calling Lock()
    void Unlock();

    LockInfo BeginUnlock();
    void EndUnlock(LockInfo info);

    ///////////////////////////////////////////////////////////////////////////////////////////////
    // Garbage collection

    void RegisterGcObject(ptr<const IObject> object);
    void RegisterNonGcObject(ptr<const IObject> object);
    ssize_t GetNumObjects() const;
    void AddGcRoot(ptr<const IObject> object);
    void RemoveGcRoot(ptr<const IObject> object);

    // Start and stop the garbage collector thread
    void StartGc();
    void StopGc();

    // Synchronusly perform a GC on the CSpace.
    // Calling thread must not have a lock on the CSpace
    void SynchronousGc();

    // Get the time per GC in milliseconds
    // No lock required.  Threadsafe.
    int32 GetMilliSecsPerGc() const;

    // Set the time per GC in milliseconds
    // No lock required.  Threadsafe.
    // Note for testing:
    //    if t < 0 then garbage collections will be performed in a tight loop without a call to
    //    sleep to give up the timeslice.  This allows for agressive garbage collection code
    //    to help show up racing conditions.
    void SetMilliSecsPerGc(int32 t);

    // Set/Get the eviction threshold for the given CSpace.  
    // During a GC the CSpace measures the total size of all resident evictable objects (using the size
    // set by calls to SetEvictableObjectSizeInBytes(),  and evicts on an LRU basis until the total
    // size is below the eviction threshold.
    ssize_t GetEvictionThreshold() const;
    void SetEvictionThreshold(ssize_t totalSizeInBytes);

    // Get the total number of completed garbage collections that have been performed on the CSpace
    // TODO find out if: (No lock required.  Threadsafe. ???????????????????????)
    int64 GetTotalNumCompletedGcs() const;

    ///////////////////////////////////////////////////////////////////////////////////////////////
    // DGS

    void StopGcAndEvictAllDgsNodes();
    ssize_t TryEvictDgsNodes(ssize_t evictionQueueIndex, ssize_t maxTotalBytes);
    ssize_t GetMaxDgsTotalByteSize(ssize_t evictionQueueIndex) const;
    void SetMaxDgsTotalByteSize(ssize_t evictionQueueIndex, ssize_t maxTotalBytes);
};

///////////////////////////////////////////////////////////////////////////////////////////////
// Async tasks

@api void PostTask(const CSpace* cspace, std::function<void()> task);

///////////////////////////////////////////////////////////////////////////////////////////////////
// Ptr slots

/*
Every CSpace contains a fixed size array of pointers typically used by clients to navigate 
from a CSpace to some containing context, such as a PSpace
*/

const ssize_t CSPACE_NUM_RESERVED_PTR_SLOTS = 16;

// The ceda framework reserves the following ptr slots
enum ReservedPointerSlots
{
    PTR_SLOT_PSpace = 0,		// slot 0 reserved for a ptr to the associated PSpace
    PTR_SLOT_WorkingSetMgr = 1,
};

inline void SetPtrSlot(CSpace* cs, ssize_t index, void* value)
{
    cxAssert(cs);
    cxAssert(0 <= index && index < CSPACE_NUM_RESERVED_PTR_SLOTS);
    GetPtrSlotArray(cs)[index] = value;
}

inline void* GetPtrSlot(CSpace* cs, ssize_t index)
{
    cxAssert(cs);
    cxAssert(0 <= index && index < CSPACE_NUM_RESERVED_PTR_SLOTS);
    return GetPtrSlotArray(cs)[index];
}

///////////////////////////////////////////////////////////////////////////////////////////////////
// Create CSpaces

const int32 CEDA_GC_THREAD_DONT_START = -2;
const int32 CEDA_GC_THREAD_NO_SLEEP = -1;
const int32 CEDA_DEFAULT_MILLISECS_PER_GC = 1000;

/*
Create a CSpace.  Initially the CSpace is not locked.

Each CSpace supports a garbage collector thread whose purpose is to perform GC on the objects owned 
by the CSpace.

milliSecsPerGC affects the GC thread as follows:
    
         milliSecsPerGC                      action
    ----------------------------------------------------------------------------------------------
    CEDA_GC_THREAD_DONT_START          Don't start the GC thread. It can be started later by 
                                       calling SetMilliSecsPerGc() then StartGc()
                                       
    CEDA_GC_THREAD_NO_SLEEP            Start the GC thread and allow it to run as fast as possible 
                                       without Sleep() calls. This option is only intended for 
                                       testing purposes (no sleeps increases the chances of 
                                       finding race conditions)
                                       
    >= 0                               Calls Sleep(milliSecsPerGC) between each GC.                                        
*/
$function+ CSpace* CreateCSpace(int32 milliSecsPerGC = CEDA_DEFAULT_MILLISECS_PER_GC);

///////////////////////////////////////////////////////////////////////////////////////////////////
// Locking multiple CSpaces

/*
There is a general purpose way of locking any number of CSpaces.  Any number of threads can lock
multiple CSpaces at a time without any dead-lock.  To ensure this it is assumed that a thread that
calls LockMultipleCSpaces() must not have already locked any CSpaces.

In the literature this approach is called conservative two phase locking.  Before accessing a set
of CSpaces it is necessary to lock all the CSpaces up front.

In principle it should be possible to release CSpace locks while the thread runs, as soon as it
knows that it will no longer need to access the CSpace again.  However, to keep the interface 
simple we currently don't support this.

The implementation avoids dead-lock by using the addresses of the CSpace pointers to give a total 
ordering on the CSpaces.  By always gaining locks in this order,  cycles in the wait-for-graph are
impossible.
*/

// Block until all the given CSpaces are locked with the given mode.  The given array of CSpaces 
// are first sorted by CSpace address to avoid dead-lock.
// Only returns if all CSpaces have been successfully locked
@api void LockMultipleCSpaces(CSpace** L, ssize_t n, ECSpaceLockMode mode);

// Unlock multiple PSpaces. Must be called by the same thread that called LockMultipleCSpaces(),
// passing the same set of CSpaces.  This is a convenience function, since each CSpace can be 
// unlocked individually in any order as long as those that are unlocked are not accessed further.
@api void UnlockMultipleCSpaces(CSpace** L, ssize_t n);

class MultipleCSpaceLock
{
public:
    MultipleCSpaceLock(CSpace** L, ssize_t n, ECSpaceLockMode mode = ECSpaceLockMode::Exclusive) : m_L(L), m_n(n)
    {
        LockMultipleCSpaces(L,n,mode);
    }
    ~MultipleCSpaceLock()
    {
        UnlockMultipleCSpaces(m_L,m_n);
    }

private:
    CSpace** m_L;
    ssize_t m_n;
};

///////////////////////////////////////////////////////////////////////////////////////////////////

// Version that allows for modes to be specified on each CSpace

struct CSpaceLockMode
{
    CSpace* space;
    ECSpaceLockMode mode;
};

// Used for sorting an array of CSpace modes in ascending order of CSpace address
inline bool operator<(const CSpaceLockMode& x, const CSpaceLockMode& y)
{
    return x.space < y.space;
}

// For debugging purposes
@api void WriteCSpaceModes(xostream& os, CSpaceLockMode* L, ssize_t n);

// Sort the given array of CSpace modes in ascending order of CSpace address
@api void SortCSpaceModes(CSpaceLockMode* L, ssize_t n);

// Block until all the given CSpaces are locked with the given modes.  Assumes that the given array 
// of CSpace have been sorted by first calling SortCSpaceModes()
// Only returns if all CSpaces have been successfully locked
@api void LockMultipleCSpaces(CSpaceLockMode* L, ssize_t n);

// Must be called by the same thread that called LockMultipleCSpaces(), passing the same set of
// CSpaces
@api void UnlockMultipleCSpaces(CSpaceLockMode* L, ssize_t n);

class MultipleCSpaceLock2
{
public:
    MultipleCSpaceLock2(CSpaceLockMode* L, ssize_t n) : m_L(L), m_n(n)
    {
        LockMultipleCSpaces(m_L,m_n);
    }
    ~MultipleCSpaceLock2()
    {
        UnlockMultipleCSpaces(m_L,m_n);
    }

private:
    CSpaceLockMode* m_L;
    ssize_t m_n;
};

///////////////////////////////////////////////////////////////////////////////////////////////////

$function+ CSpace* GetAssociatedCSpace(ptr<const IObject> object);

$function+ inline bool ObjectsBelongToDifferentCSpaces(ptr<const IObject> obj1, ptr<const IObject> obj2)
{
    if (CSpace* cs1 = GetAssociatedCSpace(obj1))
    {
        if (CSpace* cs2 = GetAssociatedCSpace(obj2))
        {
            return cs1 != cs2;
        }
    }
    return false;
}

///////////////////////////////////////////////////////////////////////////////////////////////////
// Transfer

// Must only be called when there is an exclusive lock on both the given src and dst CSpaces.
// Transfers all its objects from the src to the dst CSpace. 
// The source CSpace becomes empty after this call.  This includes its set of GC roots.
$function+ void TransferCSpace(CSpace* src, CSpace* dst);

///////////////////////////////////////////////////////////////////////////////////////////////
// Testing whether a lock has been granted

/*
In the debug build it is very useful to assert that a thread already has a lock before it accesses 
objects in a CSpace.

To allow this a thread records the set of CSpaces it has locked in thread local storage.
*/

#ifdef CEDA_CHECK_ASSERTIONS
    // Called by the calling thread that locks and unlocks CSpaces. These calls are already issued so 
    // the programmer doesn't normally need to call these directly.  They are only exposed to support
    // pseudo locks.
    @api void RecordCSpaceLock(CSpace* cs);
    @api void RecordCSpaceUnlock(CSpace* cs);

    // Returns true if and only if the current thread has locked the given CSpace
    @api bool HaveRecordedLock(const CSpace* cs);
#endif //CEDA_CHECK_ASSERTIONS

///////////////////////////////////////////////////////////////////////////////////////////////////

template<typename T>
inline bool ReadEvictablePtr(ptr<T>& cacheMember, ptr<T>& localVar)
{
    localVar = cacheMember;

    // Don't allow the compiler to emit machine code such that the cacheMember is in fact read below this memory fence
    std::atomic_thread_fence(std::memory_order_acquire);
    
    return localVar.m_self != 0 && localVar.m_table != 0;
}

template<typename T>
inline void SetEvictablePtr(ptr<T>& cacheMember, ptr<T>& localVar)
{
    // Typically SetEvictablePtr() is invoked after creating and initialising a new object pointed to by localVar and this
    // needs to be assigned to cacheMember,  but strictly after the object has been fully constructed and initialised.  
    // This write barrier ensures that the compiler emits all machine code for the creation and initialisation of the 
    // new object before this write barrier
    std::atomic_thread_fence(std::memory_order_release);

    // Check consistency in the value of m_table being assigned.  This will tend to assert if an
    // application thread breaks the requirement that the cache member always be assigned a 
    // pointer to an object of a consistent concrete class.
    #ifdef CEDA_CHECK_ASSERTIONS
    const void* tbl = cacheMember.m_table;
    std::atomic_thread_fence(std::memory_order_acquire);
    if (tbl && localVar.m_table) cxAssert(tbl == localVar.m_table);
    #endif
    
    cacheMember = localVar;
}

///////////////////////////////////////////////////////////////////////////////////////////////////

$function+ void SetTouched(ptr<const IObject> obj);
$function+ void SetEvictableObjectSizeInBytes(ptr<const IObject> obj, ssize_t sizeInBytes);

///////////////////////////////////////////////////////////////////////////////////////////////////

xostream& operator<<(xostream& os, CSpaceLockMode p);

} // namespace ceda