ObjSysState.h
// ObjSysState.h
//
// Author David Barrett-Lennard
// (C)opyright Cedanet Pty Ltd 2007
@import "cxObject.h"
@import "TypeTraits.h"
@import "TypeOps.h"
#include "Ceda/cxUtils/BasicTypes.h"
#include <cstddef>
#include <atomic>
#ifdef _MSC_VER
// 'identifier' : class 'type' needs to have dll-interface to be used by clients of class 'type2'
#pragma warning(disable:4251)
#endif
namespace ceda
{
struct CSpace;
struct ReflectedClass;
enum
{
DBP_GC_NOT_REGISTERED_IN_EXTENT,
DBP_PO_DIRTY,
DBP_PO_MARKED_DIRTY_CRC,
DBP_PO_MARKED_DURING_ASYNC_DELETION,
DBP_PO_SYNC_DELETED,
DBP_PO_MARKED,
};
using touch_time_t = std::uint32_t;
$struct+ ObjSysState
{
ObjSysState() {}
// Redefine copy semantics to comply with creation of a new distinct object
// (the ObjSysState members are not copied)
ObjSysState(const ObjSysState& rhs) {}
ObjSysState& operator=(const ObjSysState& rhs) { return *this; }
inline bool GetFlag(int bitPos) const
{
return (oss_objflags & (1 << bitPos)) != 0;
}
inline void SetFlag(int bitPos)
{
oss_objflags |= (1 << bitPos);
}
inline void ClearFlag(int bitPos)
{
oss_objflags &= ~(1 << bitPos);
}
inline void SetFlag(int bitPos, bool value)
{
if (value) oss_objflags |= (1 << bitPos);
else oss_objflags &= ~(1 << bitPos);
}
inline void SetLastTouchedTime(touch_time_t t)
{
oss_lastTouchedTime.store(t, std::memory_order_relaxed);
}
inline touch_time_t GetLastTouchedTime() const
{
return oss_lastTouchedTime.load(std::memory_order_relaxed);
}
inline bool IsMarked() const
{
return oss_marked;
//return GetFlag(DBP_GC_WEAK_MARKED);
}
inline void SetMarked()
{
oss_marked = true;
//SetFlag(DBP_GC_WEAK_MARKED);
}
inline void ClearMarked()
{
oss_marked = false;
//ClearFlag(DBP_GC_WEAK_MARKED);
}
inline ssize_t GetEvictableObjectSize() const { return oss_size; }
inline void SetEvictableObjectSize(ssize_t size) { oss_size = size; }
/*
For now to keep it simple we use full sequential consistency on the ref count, but later we should
find the most relaxed memory model that works.
For example reference counters are typically incremented using an equivalent of
std::atomic::fetch_add with std::memory_order_relaxed.
*/
inline void IncRef()
{
// memory_order_relaxed means there are no synchronization or ordering constraints imposed on
// other reads or writes, only this operation's atomicity is guaranteed.
// Therefore concurrent calls to fetch_add(1, std::memory_order_relaxed) from 2 different
// threads will always increment the ref count by 2.
//oss_refcount_.fetch_add(1, memory_order_relaxed);
++oss_refcount_;
}
inline void DecRef()
{
//oss_refcount_.fetch_sub(1, memory_order_release);
--oss_refcount_;
}
inline int RefCount() const
{
//oss_refcount_.load(1, memory_order_acquire);
return oss_refcount_;
}
std::atomic<CSpace*> oss_cspace = nullptr; // Associated CSpace if any
uint32 oss_objflags = 0; // Protected by CSpace lock
private:
std::atomic<int> oss_refcount_ = 0;
ssize_t oss_size = 0; // Non evictable objects have size==0
// evictable objects have size>0
bool oss_marked = false;
std::atomic<touch_time_t> oss_lastTouchedTime = 0;
};
} // namespace ceda