152 lines
2.9 KiB
Go
152 lines
2.9 KiB
Go
|
package dataext
|
||
|
|
||
|
import (
|
||
|
"sync"
|
||
|
)
|
||
|
|
||
|
//
|
||
|
// This is an LRU (Least-Recently-Used) cache based on a double linked list
|
||
|
// All the work we do below is to have a cache where we can easily remove the least-used element
|
||
|
// (to ensure that the cache is never bigger than maxsize items)
|
||
|
//
|
||
|
// The cache algorithm the following properties:
|
||
|
// - Memory footprint is O(n), with neglible overhead
|
||
|
// - The cache is multi-threading safe (sync.Mutex)
|
||
|
// - Inserts are O(1)
|
||
|
// - Gets are O(1)
|
||
|
// - Re-Shuffles are O(1) (= an element is removed on Insert because teh cache was full)
|
||
|
//
|
||
|
// There are also a bunch of unit tests to ensure that the cache is always in a consistent state
|
||
|
//
|
||
|
|
||
|
type LRUData interface{}
|
||
|
|
||
|
type LRUMap struct {
|
||
|
maxsize int
|
||
|
lock sync.Mutex
|
||
|
|
||
|
cache map[string]*cacheNode
|
||
|
|
||
|
lfuHead *cacheNode
|
||
|
lfuTail *cacheNode
|
||
|
}
|
||
|
|
||
|
type cacheNode struct {
|
||
|
key string
|
||
|
data LRUData
|
||
|
parent *cacheNode
|
||
|
child *cacheNode
|
||
|
}
|
||
|
|
||
|
func NewLRUMap(size int) *LRUMap {
|
||
|
if size <= 2 && size != 0 {
|
||
|
panic("Size must be > 2 (or 0)")
|
||
|
}
|
||
|
|
||
|
return &LRUMap{
|
||
|
maxsize: size,
|
||
|
lock: sync.Mutex{},
|
||
|
cache: make(map[string]*cacheNode, size+1),
|
||
|
lfuHead: nil,
|
||
|
lfuTail: nil,
|
||
|
}
|
||
|
}
|
||
|
|
||
|
func (c *LRUMap) Put(key string, value LRUData) {
|
||
|
if c.maxsize == 0 {
|
||
|
return // cache disabled
|
||
|
}
|
||
|
|
||
|
c.lock.Lock()
|
||
|
defer c.lock.Unlock()
|
||
|
|
||
|
node, exists := c.cache[key]
|
||
|
|
||
|
if exists {
|
||
|
// key already in data: only update LFU and value
|
||
|
c.moveNodeToTop(node)
|
||
|
node.data = value
|
||
|
return
|
||
|
}
|
||
|
|
||
|
// key does not exist: insert into map and add to top of LFU
|
||
|
node = &cacheNode{
|
||
|
key: key,
|
||
|
data: value,
|
||
|
parent: nil,
|
||
|
child: c.lfuHead,
|
||
|
}
|
||
|
if c.lfuHead == nil && c.lfuTail == nil { // special case - previously the cache was empty (head == tail == nil)
|
||
|
c.lfuHead = node
|
||
|
c.lfuTail = node
|
||
|
} else {
|
||
|
c.lfuHead = node
|
||
|
node.child.parent = node
|
||
|
}
|
||
|
c.cache[key] = node
|
||
|
|
||
|
if len(c.cache) > c.maxsize { // maxsize is always > 2
|
||
|
tail := c.lfuTail
|
||
|
delete(c.cache, tail.key)
|
||
|
c.lfuTail = tail.parent
|
||
|
c.lfuTail.child = nil
|
||
|
tail.parent = nil
|
||
|
tail.child = nil
|
||
|
}
|
||
|
}
|
||
|
|
||
|
func (c *LRUMap) TryGet(key string) (LRUData, bool) {
|
||
|
if c.maxsize == 0 {
|
||
|
return nil, false // cache disabled
|
||
|
}
|
||
|
|
||
|
c.lock.Lock()
|
||
|
defer c.lock.Unlock()
|
||
|
|
||
|
val, ok := c.cache[key]
|
||
|
if !ok {
|
||
|
return nil, false
|
||
|
}
|
||
|
c.moveNodeToTop(val)
|
||
|
return val.data, ok
|
||
|
}
|
||
|
|
||
|
func (c *LRUMap) moveNodeToTop(node *cacheNode) {
|
||
|
// (only called in critical section !)
|
||
|
|
||
|
if c.lfuHead == node { // fast case
|
||
|
return
|
||
|
}
|
||
|
|
||
|
// Step 1 unlink
|
||
|
|
||
|
parent := node.parent
|
||
|
child := node.child
|
||
|
if parent != nil {
|
||
|
parent.child = child
|
||
|
}
|
||
|
if child != nil {
|
||
|
child.parent = parent
|
||
|
}
|
||
|
if node == c.lfuHead {
|
||
|
c.lfuHead = node.child
|
||
|
}
|
||
|
if node == c.lfuTail {
|
||
|
c.lfuTail = node.parent
|
||
|
}
|
||
|
|
||
|
// Step 2 re-insert at top
|
||
|
node.parent = nil
|
||
|
node.child = c.lfuHead
|
||
|
c.lfuHead = node
|
||
|
if node.child != nil {
|
||
|
node.child.parent = node
|
||
|
}
|
||
|
}
|
||
|
|
||
|
func (c *LRUMap) Size() int {
|
||
|
c.lock.Lock()
|
||
|
defer c.lock.Unlock()
|
||
|
return len(c.cache)
|
||
|
}
|