DGNode.h
// DGNode.h
//
// Author David Barrett-Lennard
// (C)opyright Cedanet Pty Ltd 2006-2014
@import "cxObject.h"
@import "IObject.h"
@import "IObjectVisitor.h"
@import "FieldPath.h"
@import "CSpace.h"
#include "Ceda/cxUtils/Tracer.h"
#include "Ceda/cxUtils/DoubleLinkedList.h"
#include <set>
#include <map>
#include <unordered_map>
#include <memory>
#include <optional>
/*
The following implementation was prototyped (see Node2.cpp/h in Ceda/Core/Object/Dgs/src)
*/
namespace ceda
{
struct Guid;
/*
A single CSpace supports up to 8 distinct eviction queues for the DGS nodes
*/
const ssize_t DGS_NUM_RESERVED_EVICTION_QUEUES = 8;
const ssize_t DgsEvictionQueueIndex_SystemMemory = 0;
const ssize_t DgsEvictionQueueIndex_VideoCardTextures = 1;
class DGSystem;
enum EDepGraphFlags
{
// ---------------------- Static flags (once initialised don't change) in the low 16 bits
/*
type of node DF_SYNC_DEP DF_ASYNC
------------------------------------------------------
indep F F
dep T F
async F T
*/
DF_SYNC_DEP = 0x00000001, // Set for synchronous dep nodes (as distinct from either indep nodes or async nodes)
DF_ASYNC = 0x00000002, // Set for a dep node that represents an async node
// Set for $dep^ nodes (i.e. synchronous dep nodes that don't check whether their calculated value has changed)
// When these nodes become hard-dirty they synchronously mark their out-nodes as hard-dirty as well.
// This flag is not set for async nodes.
DF_PROPAGATE_HARD_DIRTINESS = 0x00000008,
// ---------------------- Dynamic flags (change over time) in the high 16 bits
// The following four flags are relevant to dep/async nodes, but not indep nodes
DF_SOFT_DIRTY = 0x00010000,
DF_HARD_DIRTY = 0x00020000,
DF_CALCULATING = 0x00040000, // set when a dependent node is being calculated in order to detect cycles
DF_CYCLE = 0x00080000, // set for nodes whose value cannot be calculated because of a cycle
// Note that the node may not itself be part of a cycle.
// Relevent to all three kinds of nodes (i.e. indep, dep and async nodes)
DF_IN_EVICTION_QUEUE = 0x00100000, // Set if and only if the node is in the eviction queue
DF_SCHEDULED_FOR_EVICTION = 0x00200000,
// Set for an async task, while the posted task is running
DF_ASYNC_RUNNING = 0x00400000,
// Used by DGAsyncIndepNode and DGAsyncNodeOnInput to determine whether read() has been called for the first time
DF_CALLED_READ = 0x00800000,
};
class DGDepNode;
struct DGConnector;
///////////////////////////////////////////////////////////////////////////////////////////////////
// CycleException
class CycleException
{
#ifdef __ANDROID__
// See https://developer.android.com/ndk/guides/common-problems
virtual void dummy_key_function();
#endif
};
///////////////////////////////////////////////////////////////////////////////////////////////////
// DGBaseNode
/*
Base class for either a dependent node or an independent node. All nodes support a linked list
of out-nodes.
*/
// 12 bytes on 32 bit platforms
class @api DGBaseNode : public DoubleLinkedList<DGBaseNode>::NodeBase
{
cxNotCloneable(DGBaseNode)
public:
DGBaseNode(uint32 flags);
~DGBaseNode();
void PostAsyncTask(const CSpace* cspace, std::function<void()> task) const
{
ceda::PostTask(cspace, task);
}
bool InEvictionQueue() const { return Enabled(DF_IN_EVICTION_QUEUE); }
void UpdateByteSize() const;
// Move this DGS node to the front of the eviction queue, and mark it as evictable,
// overriding IsEvictable().
// Must not be called if eviction isn't appropriate (such as for an async node which
// has posted a task to a thread pool).
// Trips an assertion if DF_ASYNC_RUNNING is set
void ScheduleEviction() const;
// Forced eviction - ignores IsEvictable().
// Should only be used when the CSpace is being destroyed
void ForceEvict() const;
/*
Try to evict the node if it's in the eviction queue.
Returns true if either
- node was not in the eviction queue so there was nothing to do; or
- node was in the eviction queue and IsEvictable() returned true allowing the node to be
evicted
Returns false if node was in the eviction queue and IsEvictable() returned false. In that case
if moveToBackOfQueueIfNotEvictable is true then the node is moved to the back of the eviction queue.
This function calls OnEvict() if and only if the node was evicted.
*/
bool TryEvict(bool moveToBackOfQueueIfNotEvictable) const;
// A given node must return the same value for EvictionQueueId()
virtual ssize_t EvictionQueueIndex() const { return DgsEvictionQueueIndex_SystemMemory; }
virtual ssize_t ByteSize() const = 0;
// Called when this node is being evicted
// An implementation of this method may for example cause this node to remove itself from a map
virtual void OnEvict() const {}
virtual bool IsEvictable() const = 0;
// Used for debugging purposes. Default behaviour is to display the address in memory of the
// DGBaseNode
virtual xstring Name() const;
/*
The dependency graph needs to be visitable. The DG nodes tend to be embedded in IObjects,
so the virtual function VisitContainingObject() should be implemented in order to visit the
containing IObject
*/
virtual void VisitContainingObject(IObjectVisitor& v) const = 0;
protected:
// Used by the evictor to remove all edges into or out of this node.
// Even though this node may become (hard) dirty, the OnInvalidate() callback isn't called.
// This can always be done without failure.
// If this is a dependent node, removing innodes is achieved by invalidating this node (making it hard dirty).
// Removing outnodes is achieved by invaliding the outnodes (making them hard-dirty).
virtual void EnsureNoInnodesOrOutnodes() const = 0;
void SetFlag(uint32 f) const
{
m_flags |= f;
}
void ClearFlag(uint32 f) const
{
m_flags &= ~f;
}
bool Enabled(uint32 f) const
{
return (m_flags & f) != 0;
}
void OnAccess_(DGSystem& system) const;
void AttachOutnode(const DGDepNode* node) const;
void DetachOutnode(DGConnector* c) const;
void MarkOutNodesAsSoftDirty() const;
void MarkOutNodesAsHardDirty_() const;
// This function is suspect
void MarkOutNodesAsSoftAndHardDirty() const;
bool HaveOutNodes() const { return m_firstOut != nullptr; }
protected:
mutable uint32 m_flags;
private:
mutable int32 lastByteSize_;
// Pointer to the first in a linked list of out-nodes.
mutable DGConnector* m_firstOut;
friend class DGSystem;
friend class DGDepNode;
friend struct FindDGNodes;
friend class DGEvictionQueue;
};
///////////////////////////////////////////////////////////////////////////////////////////////////
// DGIndepNode
class @api DGIndepNode : public DGBaseNode
{
//cxNotCloneable(DGIndepNode)
public:
DGIndepNode();
DGIndepNode(const DGIndepNode& rhs);
DGIndepNode& operator=(const DGIndepNode& rhs) { return *this; }
virtual bool IsEvictable() const { return true; }
void OnChange() const;
void ReadBarrier() const;
protected:
virtual void EnsureNoInnodesOrOutnodes() const;
};
///////////////////////////////////////////////////////////////////////////////////////////////////
// DGDepNode
// 16 bytes on 32 bit platforms
class @api DGDepNode : public DGBaseNode
{
//cxNotCloneable(DGDepNode)
public:
// Flags can be:
// DF_ASYNC pure async CPU
// DF_SYNC_DEP $dep : synchronous node that checks for change
// DF_SYNC_DEP | DF_PROPAGATE_HARD_DIRTINESS $dep^ : synchronous node that doesn't check for change
explicit DGDepNode(uint32 flags);
~DGDepNode();
DGDepNode(const DGDepNode& rhs);
DGDepNode& operator=(const DGDepNode& rhs) { return *this; }
virtual bool RecalcCache() const = 0;
/*
Called when this dependent transitions to (soft) dirty. The handler of this method is quite
limited in what can be done in this notification. It is not allowable to
- invoke any read or write barriers on any nodes of the DGS.
*/
virtual void OnInvalidate() const {}
/*
Returns true if the node has been successfully asynchronously calculated.
*/
//bool Ready() const;
// Returns true if and only if this node cannot be calculated because of a circular
// definition. This means the node is either directly involved in a cycle or depends on
// node which is directly involved in a cycle.
bool InCycle() const { return Enabled(DF_CYCLE); }
/*
The flag enableOnInvalidate should normally be true. It is currently called with false
in OpenGLWndForView::DrawScene() in order to avoid an infinite recurse.
This is suspicious. Is there a better way?
agent::OnInvalidate() --> BaseWnd::Invalidate()
--> Invalidate HWND
--> Post WM_PAINT
dequeue WM_PAINT --> OnPaint()
DrawScene()
agent.Invalidate(false); // ensure agent is dirty
// without infinite loop
agent()
*/
void Invalidate(bool enableOnInvalidate = true) const;
void ReadBarrier_(bool asIndep, bool asDep) const;
void ReadBarrierIndep() const
{
cxAssert(Enabled(DF_ASYNC));
return ReadBarrier_(true,false);
}
void ReadBarrierDep() const
{
cxAssert(Enabled(DF_ASYNC));
return ReadBarrier_(false,true);
}
void ReadBarrier() const
{
cxAssert(!Enabled(DF_ASYNC));
return ReadBarrier_(true,true);
}
bool HaveInNodes() const { return m_firstIn != nullptr; }
protected:
virtual void EnsureNoInnodesOrOutnodes() const;
private:
// Mark this node as hard-dirty. i.e. set DF_HARD_DIRTY. If this node doesn't check
// for changes in its cache then propagate hard-dirtiness into the outnodes.
bool MarkAsHardDirty() const;
// Sets DF_SOFT_DIRTY on this node and recursively on each out-node of this node. The recurse
// stops on nodes that already have the DF_SOFT_DIRTY flag set.
void MarkAsSoftDirty() const;
private:
mutable DGConnector* m_firstIn;
friend class DGBaseNode;
friend struct FindDGNodes;
};
///////////////////////////////////////////////////////////////////////////////////////////////////
@api bool CalculatingDgsNode();
/*
$cache functions with input parameters have a map from input to a dependent DGS node. Forced eviction
of the containing object requires eviction of the map elements. The following template function helps
do this:
*/
template <typename K,typename T,typename P,typename A>
bool TryEvict(std::map<K,T,P,A>& m)
{
while(!m.empty())
{
auto& node = m.begin()->second;
if (node.InEvictionQueue())
{
if (!node.TryEvict(false))
{
Tracer() << "DGS: failed to evict node " << node.Name() << '\n';
return false;
}
}
else
{
Tracer() << "DGS: ??? erasing node " << node.Name() << " in map which isn't in an eviction queue\n";
m.erase(m.begin());
}
}
return true;
}
template <typename T>
bool CacheValueIsEvictable(const T&)
{
// By default we assume cached values in maps are evictable.
return true;
}
template <typename T>
void OnEvictCacheValue(const T&)
{
}
///////////////////////////////////////////////////////////////////////////////////////////////////
// ClearCacheValue
template <typename T>
void ClearCacheValue(T&)
{
}
template <typename U, typename V>
void ClearCacheValue(std::pair<U,V>& p)
{
ClearCacheValue(p.first);
ClearCacheValue(p.second);
}
template <typename T, size_t N>
void ClearCacheValue(std::array<T,N>& a)
{
for (auto& e : a)
{
ClearCacheValue(e);
}
}
template <typename T>
void ClearCacheValue(const DynArray<T>& a)
{
a.clear();
}
template <typename T>
void ClearCacheValue(std::basic_string<T>& s)
{
std::basic_string<T>().swap(s);
}
template <typename T,typename A>
void ClearCacheValue(std::vector<T,A>& s)
{
std::vector<T,A>().swap(s);
}
template <typename T,typename A>
void ClearCacheValue(std::deque<T,A>& s)
{
std::deque<T,A>().swap(s);
}
template <typename T,typename A>
void ClearCacheValue(std::list<T,A>& s)
{
std::list<T,A>().swap(s);
}
template <typename K, typename P, typename A>
void ClearCacheValue(std::set<K,P,A>& s)
{
s.clear();
}
template <typename K, typename P, typename A>
void ClearCacheValue(std::multiset<K,P,A>& s)
{
s.clear();
}
template <typename K,typename T,typename P,typename A>
void ClearCacheValue(std::map<K,T,P,A>& s)
{
s.clear();
}
template <typename K,typename T,typename P,typename A>
void ClearCacheValue(std::multimap<K,T,P,A>& s)
{
s.clear();
}
template <typename K,typename T,typename H,typename P, typename A>
void ClearCacheValue(std::unordered_map<K,T,H,P,A>& s)
{
std::unordered_map<K,T,H,P,A>().swap(s);
}
template <typename T>
void ClearCacheValue(basic_xstring<T>& s)
{
basic_xstring<T>().swap(s);
}
template <typename T>
void ClearCacheValue(xvector<T>& s)
{
xvector<T>().swap(s);
}
template <typename T, ssize_t pageSize>
void ClearCacheValue(xdeque<T,pageSize>& s)
{
xdeque<T,pageSize>().swap(s);
}
template <typename T>
void ClearCacheValue(std::shared_ptr<T>& p)
{
p.reset();
}
template <typename T>
void ClearCacheValue(std::unique_ptr<T>& p)
{
p.reset(nullptr);
}
template <typename T>
void ClearCacheValue(std::optional<T>& p)
{
p.reset();
}
template <typename T>
void ClearCacheValue(ptr<T>& v)
{
v = null;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
// TypeHasAdditionalSize
// By default be safe and assume true
template <typename T> constexpr bool TypeHasAdditionalSize() { return true; }
// False for basic types
@for (T in [bool,int8,int16,int32,int64,uint8,uint16,uint32,uint64,char8,char16,float32,float64,Guid])
{
template <> constexpr bool TypeHasAdditionalSize<T>() { return false; }
}
///////////////////////////////////////////////////////////////////////////////////////////////////
// CacheValueAdditionalSize
#ifdef _DEBUG
const ssize_t HeapAllocationOverhead = 64;
#else
const ssize_t HeapAllocationOverhead = 16;
#endif
// Pointers to prev and next elements
const ssize_t StdListElementOverhead = HeapAllocationOverhead + 2*sizeof(void*);
// For a binary tree there are about as many internal nodes as there are leaf nodes
// For example with a tree with 16 leaf nodes there are 1+2+4+8 = 15 internal nodes
const ssize_t BinaryTreeInternalNodeOverheadFactor = 2;
struct DummyRedBlackTreeNode
{
void* left;
void* parent;
void* right;
char color;
char isnil;
};
const ssize_t RedBlackTreeElementOverhead = BinaryTreeInternalNodeOverheadFactor * (HeapAllocationOverhead + sizeof(DummyRedBlackTreeNode));
const ssize_t StdSetElementOverhead = RedBlackTreeElementOverhead;
const ssize_t StdMultiSetElementOverhead = RedBlackTreeElementOverhead;
const ssize_t StdMapElementOverhead = RedBlackTreeElementOverhead;
const ssize_t StdMultiMapElementOverhead = RedBlackTreeElementOverhead;
// The additional size means the size of additional buffers associated with the given cached
// value (i.e. in addition to sizeof(T)).
template <typename T>
ssize_t CacheValueAdditionalSize(const T&)
{
return 0;
}
template <typename T>
ssize_t CacheValueSize(const T& x)
{
return sizeof(T) + CacheValueAdditionalSize(x);
}
template <typename U, typename V>
ssize_t CacheValueAdditionalSize(const std::pair<U,V>& p)
{
return CacheValueAdditionalSize(p.first) + CacheValueAdditionalSize(p.second);
}
template <typename T, size_t N>
ssize_t CacheValueAdditionalSize(const std::array<T,N>& a)
{
ssize_t size = 0;
for (auto& e : a)
{
size += CacheValueAdditionalSize(e);
}
return size;
}
template <typename T>
ssize_t CacheValueAdditionalSize(const DynArray<T>& a)
{
ssize_t size = HeapAllocationOverhead;
for (auto& e : a)
{
size += CacheValueAdditionalSize(e);
}
return size;
}
template <typename T>
ssize_t CacheValueAdditionalSize(const std::basic_string<T>& s)
{
return HeapAllocationOverhead + s.size() * sizeof(T);
}
template <typename T,typename A>
ssize_t CacheValueAdditionalSize(const std::vector<T,A>& s)
{
ssize_t size = HeapAllocationOverhead + s.size() * sizeof(T);
for (auto& e : s)
{
size += CacheValueAdditionalSize(e);
}
return size;
}
template <typename T,typename A>
ssize_t CacheValueAdditionalSize(const std::deque<T,A>& s)
{
ssize_t size = HeapAllocationOverhead + s.size() * sizeof(T);
for (auto& e : s)
{
size += CacheValueAdditionalSize(e);
}
return size;
}
template <typename T,typename A>
ssize_t CacheValueAdditionalSize(const std::list<T,A>& s)
{
ssize_t size = s.size() * (sizeof(T) + StdListElementOverhead);
for (auto& e : s)
{
size += CacheValueAdditionalSize(e);
}
return size;
}
template <typename K, typename P, typename A>
ssize_t CacheValueAdditionalSize(const std::set<K,P,A>& s)
{
ssize_t size = s.size() * (sizeof(K) + StdSetElementOverhead);
for (auto& e : s)
{
size += CacheValueAdditionalSize(e);
}
return size;
}
template <typename K, typename P, typename A>
ssize_t CacheValueAdditionalSize(const std::multiset<K,P,A>& s)
{
ssize_t size = s.size() * (sizeof(K) + StdMultiSetElementOverhead);
for (auto& e : s)
{
size += CacheValueAdditionalSize(e);
}
return size;
}
template <typename K,typename T,typename P,typename A>
ssize_t CacheValueAdditionalSize(const std::map<K,T,P,A>& s)
{
ssize_t size = s.size() * (sizeof(std::pair<K,T>) + StdMapElementOverhead);
for (auto& e : s)
{
size += CacheValueAdditionalSize(e);
}
return size;
}
template <typename K,typename T,typename P,typename A>
ssize_t CacheValueAdditionalSize(const std::multimap<K,T,P,A>& s)
{
ssize_t size = s.size() * (sizeof(std::pair<K,T>) + StdMultiMapElementOverhead);
for (auto& e : s)
{
size += CacheValueAdditionalSize(e);
}
return size;
}
template <typename T>
ssize_t CacheValueAdditionalSize(const basic_xstring<T>& s)
{
return HeapAllocationOverhead + s.size() * sizeof(T);
}
template <typename T>
ssize_t CacheValueAdditionalSize(const xvector<T>& s)
{
ssize_t size = HeapAllocationOverhead + s.size() * sizeof(T);
for (auto& e : s)
{
size += CacheValueAdditionalSize(e);
}
return size;
}
template <typename T, ssize_t pageSize>
ssize_t CacheValueAdditionalSize(const xdeque<T,pageSize>& s)
{
ssize_t size = HeapAllocationOverhead + s.size() * sizeof(T);
for (auto& e : s)
{
size += CacheValueAdditionalSize(e);
}
return size;
}
template <typename T>
ssize_t CacheValueAdditionalSize(const std::shared_ptr<T>& p)
{
return p ? HeapAllocationOverhead + CacheValueSize(*p) : 0;
}
template <typename T>
ssize_t CacheValueAdditionalSize(const std::unique_ptr<T>& p)
{
return p ? HeapAllocationOverhead + CacheValueSize(*p) : 0;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
// Agent
struct Agent
{
Agent(bool finished = false) : finished(finished) {}
bool finished;
};
inline xostream& operator<<(xostream& os, const Agent& a)
{
os << "agent " << a.finished;
return os;
}
inline bool CacheValueIsEvictable(const Agent& agent)
{
return agent.finished;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
// DGSyncDepNode
template <typename FinalClass, typename Self, typename Output>
class DGSyncDepNode : public DGDepNode
{
public:
typedef Output output_type;
DGSyncDepNode() : DGDepNode(DF_SYNC_DEP) {}
virtual ssize_t ByteSize() const
{
// This is the *overhead* of caching the output, it is assumed when OnEvict() calls
// ClearCacheValue(_output), CacheValueAdditionalSize() gives the number of bytes that is recovered.
return CacheValueAdditionalSize(_output);
}
virtual void VisitContainingObject(IObjectVisitor& _v) const
{
_v << _output << static_cast<const FinalClass*>(this)->_GetFinalSelf();
}
virtual void OnEvict() const
{
OnEvictCacheValue(_output);
ClearCacheValue(_output);
}
virtual bool IsEvictable() const
{
return CacheValueIsEvictable(_output);
}
const output_type& read() const
{
ReadBarrier();
return _output;
}
output_type& RawCache() { return _output; }
const output_type& RawCache() const { return _output; }
protected:
mutable output_type _output; // Protected by a CSpace lock
};
///////////////////////////////////////////////////////////////////////////////////////////////////
// DGKeyedSyncDepNode
template<typename Key, typename Value>
const Key& GetKeyFromValueInPair(const Value* v)
{
typedef std::pair<Key,Value> Pair;
auto p = (Pair const*) ((octet_t const*)v - offsetof(Pair, second));
return p->first;
}
template <typename FinalClass, typename Self, typename Key, typename Output>
class DGKeyedSyncDepNode : public DGDepNode
{
public:
typedef Key key_type;
typedef Output output_type;
typedef std::map<key_type,FinalClass> map_type;
DGKeyedSyncDepNode() : DGDepNode(DF_SYNC_DEP) {}
key_type const& _GetKey() const
{
return GetKeyFromValueInPair<key_type,FinalClass>(static_cast<const FinalClass*>(this));
}
virtual ssize_t ByteSize() const
{
const ssize_t MapElementSize = sizeof(typename map_type::value_type);
return StdMapElementOverhead + MapElementSize + CacheValueAdditionalSize(_output);
}
virtual void VisitContainingObject(IObjectVisitor& _v) const
{
_v << _output << static_cast<const FinalClass*>(this)->_GetFinalSelf();
}
virtual void OnEvict() const
{
OnEvictCacheValue(_output);
auto& m = static_cast<const FinalClass*>(this)->_GetMap();
cxVerify(m.erase(_GetKey()) == 1);
}
virtual bool IsEvictable() const
{
return CacheValueIsEvictable(_output);
}
const output_type& read(Self const* self) const
{
_self = self;
ReadBarrier();
return _output;
}
output_type& RawCache() { return _output; }
const output_type& RawCache() const { return _output; }
protected:
mutable const Self* _self;
mutable output_type _output; // Protected by a CSpace lock
};
} // namespace ceda