package dataext import ( "sync" ) // // This is an LRU (Least-Recently-Used) cache based on a double linked list // All the work we do below is to have a cache where we can easily remove the least-used element // (to ensure that the cache is never bigger than maxsize items) // // The cache algorithm the following properties: // - Memory footprint is O(n), with neglible overhead // - The cache is multi-threading safe (sync.Mutex) // - Inserts are O(1) // - Gets are O(1) // - Re-Shuffles are O(1) (= an element is removed on Insert because teh cache was full) // // There are also a bunch of unit tests to ensure that the cache is always in a consistent state // type LRUMap[TData any] struct { maxsize int lock sync.Mutex cache map[string]*cacheNode[TData] lfuHead *cacheNode[TData] lfuTail *cacheNode[TData] } type cacheNode[TData any] struct { key string data TData parent *cacheNode[TData] child *cacheNode[TData] } func NewLRUMap[TData any](size int) *LRUMap[TData] { if size <= 2 && size != 0 { panic("Size must be > 2 (or 0)") } return &LRUMap[TData]{ maxsize: size, lock: sync.Mutex{}, cache: make(map[string]*cacheNode[TData], size+1), lfuHead: nil, lfuTail: nil, } } func (c *LRUMap[TData]) Put(key string, value TData) { if c.maxsize == 0 { return // cache disabled } c.lock.Lock() defer c.lock.Unlock() node, exists := c.cache[key] if exists { // key already in data: only update LFU and value c.moveNodeToTop(node) node.data = value return } // key does not exist: insert into map and add to top of LFU node = &cacheNode[TData]{ key: key, data: value, parent: nil, child: c.lfuHead, } if c.lfuHead == nil && c.lfuTail == nil { // special case - previously the cache was empty (head == tail == nil) c.lfuHead = node c.lfuTail = node } else { c.lfuHead = node node.child.parent = node } c.cache[key] = node if len(c.cache) > c.maxsize { // maxsize is always > 2 tail := c.lfuTail delete(c.cache, tail.key) c.lfuTail = tail.parent c.lfuTail.child = nil tail.parent = nil tail.child = nil } } func (c *LRUMap[TData]) TryGet(key string) (TData, bool) { if c.maxsize == 0 { return *new(TData), false // cache disabled } c.lock.Lock() defer c.lock.Unlock() val, ok := c.cache[key] if !ok { return *new(TData), false } c.moveNodeToTop(val) return val.data, ok } func (c *LRUMap[TData]) moveNodeToTop(node *cacheNode[TData]) { // (only called in critical section !) if c.lfuHead == node { // fast case return } // Step 1 unlink parent := node.parent child := node.child if parent != nil { parent.child = child } if child != nil { child.parent = parent } if node == c.lfuHead { c.lfuHead = node.child } if node == c.lfuTail { c.lfuTail = node.parent } // Step 2 re-insert at top node.parent = nil node.child = c.lfuHead c.lfuHead = node if node.child != nil { node.child.parent = node } } func (c *LRUMap[TData]) Size() int { c.lock.Lock() defer c.lock.Unlock() return len(c.cache) }