mirror of https://github.com/usememos/memos
				
				
				
			refactor: store cache
							parent
							
								
									c23aebd648
								
							
						
					
					
						commit
						ad2c5f0d05
					
				@ -0,0 +1,121 @@
 | 
			
		||||
package profiler
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"context"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"log/slog"
 | 
			
		||||
	"net/http"
 | 
			
		||||
	"net/http/pprof"
 | 
			
		||||
	"runtime"
 | 
			
		||||
	"time"
 | 
			
		||||
 | 
			
		||||
	"github.com/labstack/echo/v4"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// Profiler provides HTTP endpoints for memory profiling
 | 
			
		||||
type Profiler struct {
 | 
			
		||||
	memStatsLogInterval time.Duration
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NewProfiler creates a new profiler
 | 
			
		||||
func NewProfiler() *Profiler {
 | 
			
		||||
	return &Profiler{
 | 
			
		||||
		memStatsLogInterval: 1 * time.Minute,
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// RegisterRoutes adds profiling endpoints to the Echo server
 | 
			
		||||
func (p *Profiler) RegisterRoutes(e *echo.Echo) {
 | 
			
		||||
	// Register pprof handlers
 | 
			
		||||
	g := e.Group("/debug/pprof")
 | 
			
		||||
	g.GET("", echo.WrapHandler(http.HandlerFunc(pprof.Index)))
 | 
			
		||||
	g.GET("/cmdline", echo.WrapHandler(http.HandlerFunc(pprof.Cmdline)))
 | 
			
		||||
	g.GET("/profile", echo.WrapHandler(http.HandlerFunc(pprof.Profile)))
 | 
			
		||||
	g.POST("/symbol", echo.WrapHandler(http.HandlerFunc(pprof.Symbol)))
 | 
			
		||||
	g.GET("/symbol", echo.WrapHandler(http.HandlerFunc(pprof.Symbol)))
 | 
			
		||||
	g.GET("/trace", echo.WrapHandler(http.HandlerFunc(pprof.Trace)))
 | 
			
		||||
	g.GET("/allocs", echo.WrapHandler(http.HandlerFunc(pprof.Handler("allocs").ServeHTTP)))
 | 
			
		||||
	g.GET("/block", echo.WrapHandler(http.HandlerFunc(pprof.Handler("block").ServeHTTP)))
 | 
			
		||||
	g.GET("/goroutine", echo.WrapHandler(http.HandlerFunc(pprof.Handler("goroutine").ServeHTTP)))
 | 
			
		||||
	g.GET("/heap", echo.WrapHandler(http.HandlerFunc(pprof.Handler("heap").ServeHTTP)))
 | 
			
		||||
	g.GET("/mutex", echo.WrapHandler(http.HandlerFunc(pprof.Handler("mutex").ServeHTTP)))
 | 
			
		||||
	g.GET("/threadcreate", echo.WrapHandler(http.HandlerFunc(pprof.Handler("threadcreate").ServeHTTP)))
 | 
			
		||||
 | 
			
		||||
	// Add a custom memory stats endpoint
 | 
			
		||||
	g.GET("/memstats", func(c echo.Context) error {
 | 
			
		||||
		var m runtime.MemStats
 | 
			
		||||
		runtime.ReadMemStats(&m)
 | 
			
		||||
		return c.JSON(http.StatusOK, map[string]interface{}{
 | 
			
		||||
			"alloc":       m.Alloc,
 | 
			
		||||
			"totalAlloc":  m.TotalAlloc,
 | 
			
		||||
			"sys":         m.Sys,
 | 
			
		||||
			"numGC":       m.NumGC,
 | 
			
		||||
			"heapAlloc":   m.HeapAlloc,
 | 
			
		||||
			"heapSys":     m.HeapSys,
 | 
			
		||||
			"heapInuse":   m.HeapInuse,
 | 
			
		||||
			"heapObjects": m.HeapObjects,
 | 
			
		||||
		})
 | 
			
		||||
	})
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// StartMemoryMonitor starts a goroutine that periodically logs memory stats
 | 
			
		||||
func (p *Profiler) StartMemoryMonitor(ctx context.Context) {
 | 
			
		||||
	go func() {
 | 
			
		||||
		ticker := time.NewTicker(p.memStatsLogInterval)
 | 
			
		||||
		defer ticker.Stop()
 | 
			
		||||
 | 
			
		||||
		// Store previous heap allocation to track growth
 | 
			
		||||
		var lastHeapAlloc uint64
 | 
			
		||||
		var lastNumGC uint32
 | 
			
		||||
 | 
			
		||||
		for {
 | 
			
		||||
			select {
 | 
			
		||||
			case <-ticker.C:
 | 
			
		||||
				var m runtime.MemStats
 | 
			
		||||
				runtime.ReadMemStats(&m)
 | 
			
		||||
 | 
			
		||||
				// Calculate heap growth since last check
 | 
			
		||||
				heapGrowth := int64(m.HeapAlloc) - int64(lastHeapAlloc)
 | 
			
		||||
				gcCount := m.NumGC - lastNumGC
 | 
			
		||||
 | 
			
		||||
				slog.Info("memory stats",
 | 
			
		||||
					"heapAlloc", byteCountIEC(m.HeapAlloc),
 | 
			
		||||
					"heapSys", byteCountIEC(m.HeapSys),
 | 
			
		||||
					"heapObjects", m.HeapObjects,
 | 
			
		||||
					"heapGrowth", byteCountIEC(uint64(heapGrowth)),
 | 
			
		||||
					"numGoroutine", runtime.NumGoroutine(),
 | 
			
		||||
					"numGC", m.NumGC,
 | 
			
		||||
					"gcSince", gcCount,
 | 
			
		||||
					"nextGC", byteCountIEC(m.NextGC),
 | 
			
		||||
					"gcPause", time.Duration(m.PauseNs[(m.NumGC+255)%256]).String(),
 | 
			
		||||
				)
 | 
			
		||||
 | 
			
		||||
				// Track values for next iteration
 | 
			
		||||
				lastHeapAlloc = m.HeapAlloc
 | 
			
		||||
				lastNumGC = m.NumGC
 | 
			
		||||
 | 
			
		||||
				// Force GC if memory usage is high to see if objects can be reclaimed
 | 
			
		||||
				if m.HeapAlloc > 500*1024*1024 { // 500 MB threshold
 | 
			
		||||
					slog.Info("forcing garbage collection due to high memory usage")
 | 
			
		||||
					runtime.GC()
 | 
			
		||||
				}
 | 
			
		||||
			case <-ctx.Done():
 | 
			
		||||
				return
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
	}()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// byteCountIEC converts bytes to a human-readable string (MiB, GiB)
 | 
			
		||||
func byteCountIEC(b uint64) string {
 | 
			
		||||
	const unit = 1024
 | 
			
		||||
	if b < unit {
 | 
			
		||||
		return fmt.Sprintf("%d B", b)
 | 
			
		||||
	}
 | 
			
		||||
	div, exp := uint64(unit), 0
 | 
			
		||||
	for n := b / unit; n >= unit; n /= unit {
 | 
			
		||||
		div *= unit
 | 
			
		||||
		exp++
 | 
			
		||||
	}
 | 
			
		||||
	return fmt.Sprintf("%.1f %ciB", float64(b)/float64(div), "KMGTPE"[exp])
 | 
			
		||||
}
 | 
			
		||||
@ -0,0 +1,311 @@
 | 
			
		||||
package cache
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"context"
 | 
			
		||||
	"sync"
 | 
			
		||||
	"sync/atomic"
 | 
			
		||||
	"time"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// Interface defines the operations a cache must support
 | 
			
		||||
type Interface interface {
 | 
			
		||||
	// Set adds a value to the cache with the default TTL
 | 
			
		||||
	Set(ctx context.Context, key string, value interface{})
 | 
			
		||||
 | 
			
		||||
	// SetWithTTL adds a value to the cache with a custom TTL
 | 
			
		||||
	SetWithTTL(ctx context.Context, key string, value interface{}, ttl time.Duration)
 | 
			
		||||
 | 
			
		||||
	// Get retrieves a value from the cache
 | 
			
		||||
	Get(ctx context.Context, key string) (interface{}, bool)
 | 
			
		||||
 | 
			
		||||
	// Delete removes a value from the cache
 | 
			
		||||
	Delete(ctx context.Context, key string)
 | 
			
		||||
 | 
			
		||||
	// Clear removes all values from the cache
 | 
			
		||||
	Clear(ctx context.Context)
 | 
			
		||||
 | 
			
		||||
	// Size returns the number of items in the cache
 | 
			
		||||
	Size() int64
 | 
			
		||||
 | 
			
		||||
	// Close stops all background tasks and releases resources
 | 
			
		||||
	Close() error
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// item represents a cached value with metadata
 | 
			
		||||
type item struct {
 | 
			
		||||
	value      interface{}
 | 
			
		||||
	expiration time.Time
 | 
			
		||||
	size       int // Approximate size in bytes
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Config contains options for configuring a cache
 | 
			
		||||
type Config struct {
 | 
			
		||||
	// DefaultTTL is the default time-to-live for cache entries
 | 
			
		||||
	DefaultTTL time.Duration
 | 
			
		||||
 | 
			
		||||
	// CleanupInterval is how often the cache runs cleanup
 | 
			
		||||
	CleanupInterval time.Duration
 | 
			
		||||
 | 
			
		||||
	// MaxItems is the maximum number of items allowed in the cache
 | 
			
		||||
	MaxItems int
 | 
			
		||||
 | 
			
		||||
	// OnEviction is called when an item is evicted from the cache
 | 
			
		||||
	OnEviction func(key string, value interface{})
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// DefaultConfig returns a default configuration for the cache
 | 
			
		||||
func DefaultConfig() Config {
 | 
			
		||||
	return Config{
 | 
			
		||||
		DefaultTTL:      10 * time.Minute,
 | 
			
		||||
		CleanupInterval: 5 * time.Minute,
 | 
			
		||||
		MaxItems:        1000,
 | 
			
		||||
		OnEviction:      nil,
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Cache is a thread-safe in-memory cache with TTL and memory management
 | 
			
		||||
type Cache struct {
 | 
			
		||||
	data       sync.Map
 | 
			
		||||
	config     Config
 | 
			
		||||
	itemCount  int64 // Use atomic operations to track item count
 | 
			
		||||
	stopChan   chan struct{}
 | 
			
		||||
	closedChan chan struct{}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// New creates a new memory cache with the given configuration
 | 
			
		||||
func New(config Config) *Cache {
 | 
			
		||||
	c := &Cache{
 | 
			
		||||
		config:     config,
 | 
			
		||||
		stopChan:   make(chan struct{}),
 | 
			
		||||
		closedChan: make(chan struct{}),
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	go c.cleanupLoop()
 | 
			
		||||
	return c
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NewDefault creates a new memory cache with default configuration
 | 
			
		||||
func NewDefault() *Cache {
 | 
			
		||||
	return New(DefaultConfig())
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Set adds a value to the cache with the default TTL
 | 
			
		||||
func (c *Cache) Set(ctx context.Context, key string, value interface{}) {
 | 
			
		||||
	c.SetWithTTL(ctx, key, value, c.config.DefaultTTL)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// SetWithTTL adds a value to the cache with a custom TTL
 | 
			
		||||
func (c *Cache) SetWithTTL(ctx context.Context, key string, value interface{}, ttl time.Duration) {
 | 
			
		||||
	// Estimate size of the item (very rough approximation)
 | 
			
		||||
	size := estimateSize(value)
 | 
			
		||||
 | 
			
		||||
	// Check if item already exists to avoid double counting
 | 
			
		||||
	if _, exists := c.data.Load(key); exists {
 | 
			
		||||
		c.data.Delete(key)
 | 
			
		||||
		// Don't decrement count - we'll replace it
 | 
			
		||||
	} else {
 | 
			
		||||
		// Only increment if this is a new key
 | 
			
		||||
		atomic.AddInt64(&c.itemCount, 1)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	c.data.Store(key, item{
 | 
			
		||||
		value:      value,
 | 
			
		||||
		expiration: time.Now().Add(ttl),
 | 
			
		||||
		size:       size,
 | 
			
		||||
	})
 | 
			
		||||
 | 
			
		||||
	// If we're over the max items, clean up old items
 | 
			
		||||
	if c.config.MaxItems > 0 && atomic.LoadInt64(&c.itemCount) > int64(c.config.MaxItems) {
 | 
			
		||||
		c.cleanupOldest()
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Get retrieves a value from the cache
 | 
			
		||||
func (c *Cache) Get(ctx context.Context, key string) (interface{}, bool) {
 | 
			
		||||
	value, ok := c.data.Load(key)
 | 
			
		||||
	if !ok {
 | 
			
		||||
		return nil, false
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	itm := value.(item)
 | 
			
		||||
	if time.Now().After(itm.expiration) {
 | 
			
		||||
		c.data.Delete(key)
 | 
			
		||||
		atomic.AddInt64(&c.itemCount, -1)
 | 
			
		||||
 | 
			
		||||
		if c.config.OnEviction != nil {
 | 
			
		||||
			c.config.OnEviction(key, itm.value)
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		return nil, false
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return itm.value, true
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Delete removes a value from the cache
 | 
			
		||||
func (c *Cache) Delete(ctx context.Context, key string) {
 | 
			
		||||
	if value, loaded := c.data.LoadAndDelete(key); loaded {
 | 
			
		||||
		atomic.AddInt64(&c.itemCount, -1)
 | 
			
		||||
 | 
			
		||||
		if c.config.OnEviction != nil {
 | 
			
		||||
			itm := value.(item)
 | 
			
		||||
			c.config.OnEviction(key, itm.value)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Clear removes all values from the cache
 | 
			
		||||
func (c *Cache) Clear(ctx context.Context) {
 | 
			
		||||
	if c.config.OnEviction != nil {
 | 
			
		||||
		c.data.Range(func(key, value interface{}) bool {
 | 
			
		||||
			itm := value.(item)
 | 
			
		||||
			c.config.OnEviction(key.(string), itm.value)
 | 
			
		||||
			return true
 | 
			
		||||
		})
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	c.data = sync.Map{}
 | 
			
		||||
	atomic.StoreInt64(&c.itemCount, 0)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Size returns the number of items in the cache
 | 
			
		||||
func (c *Cache) Size() int64 {
 | 
			
		||||
	return atomic.LoadInt64(&c.itemCount)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Close stops the cache cleanup goroutine
 | 
			
		||||
func (c *Cache) Close() error {
 | 
			
		||||
	select {
 | 
			
		||||
	case <-c.stopChan:
 | 
			
		||||
		// Already closed
 | 
			
		||||
		return nil
 | 
			
		||||
	default:
 | 
			
		||||
		close(c.stopChan)
 | 
			
		||||
		<-c.closedChan // Wait for cleanup goroutine to exit
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// cleanupLoop periodically cleans up expired items
 | 
			
		||||
func (c *Cache) cleanupLoop() {
 | 
			
		||||
	ticker := time.NewTicker(c.config.CleanupInterval)
 | 
			
		||||
	defer func() {
 | 
			
		||||
		ticker.Stop()
 | 
			
		||||
		close(c.closedChan)
 | 
			
		||||
	}()
 | 
			
		||||
 | 
			
		||||
	for {
 | 
			
		||||
		select {
 | 
			
		||||
		case <-ticker.C:
 | 
			
		||||
			c.cleanup()
 | 
			
		||||
		case <-c.stopChan:
 | 
			
		||||
			return
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// cleanup removes expired items
 | 
			
		||||
func (c *Cache) cleanup() {
 | 
			
		||||
	evicted := make(map[string]interface{})
 | 
			
		||||
	count := 0
 | 
			
		||||
 | 
			
		||||
	c.data.Range(func(key, value interface{}) bool {
 | 
			
		||||
		itm := value.(item)
 | 
			
		||||
		if time.Now().After(itm.expiration) {
 | 
			
		||||
			c.data.Delete(key)
 | 
			
		||||
			count++
 | 
			
		||||
 | 
			
		||||
			if c.config.OnEviction != nil {
 | 
			
		||||
				evicted[key.(string)] = itm.value
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
		return true
 | 
			
		||||
	})
 | 
			
		||||
 | 
			
		||||
	if count > 0 {
 | 
			
		||||
		atomic.AddInt64(&c.itemCount, -int64(count))
 | 
			
		||||
 | 
			
		||||
		// Call eviction callbacks outside the loop to avoid blocking the range
 | 
			
		||||
		if c.config.OnEviction != nil {
 | 
			
		||||
			for k, v := range evicted {
 | 
			
		||||
				c.config.OnEviction(k, v)
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// cleanupOldest removes the oldest items if we're over the max items
 | 
			
		||||
func (c *Cache) cleanupOldest() {
 | 
			
		||||
	threshold := c.config.MaxItems / 5 // Remove 20% of max items at once
 | 
			
		||||
	if threshold < 1 {
 | 
			
		||||
		threshold = 1
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	currentCount := atomic.LoadInt64(&c.itemCount)
 | 
			
		||||
 | 
			
		||||
	// If we're not over the threshold, don't do anything
 | 
			
		||||
	if currentCount <= int64(c.config.MaxItems) {
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Find the oldest items
 | 
			
		||||
	type keyExpPair struct {
 | 
			
		||||
		key        string
 | 
			
		||||
		value      interface{}
 | 
			
		||||
		expiration time.Time
 | 
			
		||||
	}
 | 
			
		||||
	candidates := make([]keyExpPair, 0, threshold)
 | 
			
		||||
 | 
			
		||||
	c.data.Range(func(key, value interface{}) bool {
 | 
			
		||||
		itm := value.(item)
 | 
			
		||||
		if len(candidates) < threshold {
 | 
			
		||||
			candidates = append(candidates, keyExpPair{key.(string), itm.value, itm.expiration})
 | 
			
		||||
			return true
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		// Find the newest item in candidates
 | 
			
		||||
		newestIdx := 0
 | 
			
		||||
		for i := 1; i < len(candidates); i++ {
 | 
			
		||||
			if candidates[i].expiration.After(candidates[newestIdx].expiration) {
 | 
			
		||||
				newestIdx = i
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		// Replace it if this item is older
 | 
			
		||||
		if itm.expiration.Before(candidates[newestIdx].expiration) {
 | 
			
		||||
			candidates[newestIdx] = keyExpPair{key.(string), itm.value, itm.expiration}
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		return true
 | 
			
		||||
	})
 | 
			
		||||
 | 
			
		||||
	// Delete the oldest items
 | 
			
		||||
	deletedCount := 0
 | 
			
		||||
	for _, candidate := range candidates {
 | 
			
		||||
		c.data.Delete(candidate.key)
 | 
			
		||||
		deletedCount++
 | 
			
		||||
 | 
			
		||||
		if c.config.OnEviction != nil {
 | 
			
		||||
			c.config.OnEviction(candidate.key, candidate.value)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Update count
 | 
			
		||||
	if deletedCount > 0 {
 | 
			
		||||
		atomic.AddInt64(&c.itemCount, -int64(deletedCount))
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// estimateSize attempts to estimate the memory footprint of a value
 | 
			
		||||
func estimateSize(value interface{}) int {
 | 
			
		||||
	switch v := value.(type) {
 | 
			
		||||
	case string:
 | 
			
		||||
		return len(v) + 24 // base size + string overhead
 | 
			
		||||
	case []byte:
 | 
			
		||||
		return len(v) + 24 // base size + slice overhead
 | 
			
		||||
	case map[string]interface{}:
 | 
			
		||||
		return len(v) * 64 // rough estimate
 | 
			
		||||
	default:
 | 
			
		||||
		return 64 // default conservative estimate
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
@ -0,0 +1,209 @@
 | 
			
		||||
package cache
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"context"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"sync"
 | 
			
		||||
	"testing"
 | 
			
		||||
	"time"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
func TestCacheBasicOperations(t *testing.T) {
 | 
			
		||||
	ctx := context.Background()
 | 
			
		||||
	config := DefaultConfig()
 | 
			
		||||
	config.DefaultTTL = 100 * time.Millisecond
 | 
			
		||||
	config.CleanupInterval = 50 * time.Millisecond
 | 
			
		||||
	cache := New(config)
 | 
			
		||||
	defer cache.Close()
 | 
			
		||||
 | 
			
		||||
	// Test Set and Get
 | 
			
		||||
	cache.Set(ctx, "key1", "value1")
 | 
			
		||||
	if val, ok := cache.Get(ctx, "key1"); !ok || val != "value1" {
 | 
			
		||||
		t.Errorf("Expected 'value1', got %v, exists: %v", val, ok)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Test SetWithTTL
 | 
			
		||||
	cache.SetWithTTL(ctx, "key2", "value2", 200*time.Millisecond)
 | 
			
		||||
	if val, ok := cache.Get(ctx, "key2"); !ok || val != "value2" {
 | 
			
		||||
		t.Errorf("Expected 'value2', got %v, exists: %v", val, ok)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Test Delete
 | 
			
		||||
	cache.Delete(ctx, "key1")
 | 
			
		||||
	if _, ok := cache.Get(ctx, "key1"); ok {
 | 
			
		||||
		t.Errorf("Key 'key1' should have been deleted")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Test automatic expiration
 | 
			
		||||
	time.Sleep(150 * time.Millisecond)
 | 
			
		||||
	if _, ok := cache.Get(ctx, "key1"); ok {
 | 
			
		||||
		t.Errorf("Key 'key1' should have expired")
 | 
			
		||||
	}
 | 
			
		||||
	// key2 should still be valid (200ms TTL)
 | 
			
		||||
	if _, ok := cache.Get(ctx, "key2"); !ok {
 | 
			
		||||
		t.Errorf("Key 'key2' should still be valid")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Wait for key2 to expire
 | 
			
		||||
	time.Sleep(100 * time.Millisecond)
 | 
			
		||||
	if _, ok := cache.Get(ctx, "key2"); ok {
 | 
			
		||||
		t.Errorf("Key 'key2' should have expired")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Test Clear
 | 
			
		||||
	cache.Set(ctx, "key3", "value3")
 | 
			
		||||
	cache.Clear(ctx)
 | 
			
		||||
	if _, ok := cache.Get(ctx, "key3"); ok {
 | 
			
		||||
		t.Errorf("Cache should be empty after Clear()")
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestCacheEviction(t *testing.T) {
 | 
			
		||||
	ctx := context.Background()
 | 
			
		||||
	config := DefaultConfig()
 | 
			
		||||
	config.MaxItems = 5
 | 
			
		||||
	cache := New(config)
 | 
			
		||||
	defer cache.Close()
 | 
			
		||||
 | 
			
		||||
	// Add 5 items (max capacity)
 | 
			
		||||
	for i := 0; i < 5; i++ {
 | 
			
		||||
		key := fmt.Sprintf("key%d", i)
 | 
			
		||||
		cache.Set(ctx, key, i)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Verify all 5 items are in the cache
 | 
			
		||||
	for i := 0; i < 5; i++ {
 | 
			
		||||
		key := fmt.Sprintf("key%d", i)
 | 
			
		||||
		if _, ok := cache.Get(ctx, key); !ok {
 | 
			
		||||
			t.Errorf("Key '%s' should be in the cache", key)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Add 2 more items to trigger eviction
 | 
			
		||||
	cache.Set(ctx, "keyA", "valueA")
 | 
			
		||||
	cache.Set(ctx, "keyB", "valueB")
 | 
			
		||||
 | 
			
		||||
	// Verify size is still within limits
 | 
			
		||||
	if cache.Size() > int64(config.MaxItems) {
 | 
			
		||||
		t.Errorf("Cache size %d exceeds limit %d", cache.Size(), config.MaxItems)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Some of the original keys should have been evicted
 | 
			
		||||
	evictedCount := 0
 | 
			
		||||
	for i := 0; i < 5; i++ {
 | 
			
		||||
		key := fmt.Sprintf("key%d", i)
 | 
			
		||||
		if _, ok := cache.Get(ctx, key); !ok {
 | 
			
		||||
			evictedCount++
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if evictedCount == 0 {
 | 
			
		||||
		t.Errorf("No keys were evicted despite exceeding max items")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// The newer keys should still be present
 | 
			
		||||
	if _, ok := cache.Get(ctx, "keyA"); !ok {
 | 
			
		||||
		t.Errorf("Key 'keyA' should be in the cache")
 | 
			
		||||
	}
 | 
			
		||||
	if _, ok := cache.Get(ctx, "keyB"); !ok {
 | 
			
		||||
		t.Errorf("Key 'keyB' should be in the cache")
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestCacheConcurrency(t *testing.T) {
 | 
			
		||||
	ctx := context.Background()
 | 
			
		||||
	cache := NewDefault()
 | 
			
		||||
	defer cache.Close()
 | 
			
		||||
 | 
			
		||||
	const goroutines = 10
 | 
			
		||||
	const operationsPerGoroutine = 100
 | 
			
		||||
 | 
			
		||||
	var wg sync.WaitGroup
 | 
			
		||||
	wg.Add(goroutines)
 | 
			
		||||
 | 
			
		||||
	for i := 0; i < goroutines; i++ {
 | 
			
		||||
		go func(id int) {
 | 
			
		||||
			defer wg.Done()
 | 
			
		||||
 | 
			
		||||
			baseKey := fmt.Sprintf("worker%d-", id)
 | 
			
		||||
 | 
			
		||||
			// Set operations
 | 
			
		||||
			for j := 0; j < operationsPerGoroutine; j++ {
 | 
			
		||||
				key := fmt.Sprintf("%skey%d", baseKey, j)
 | 
			
		||||
				value := fmt.Sprintf("value%d-%d", id, j)
 | 
			
		||||
				cache.Set(ctx, key, value)
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			// Get operations
 | 
			
		||||
			for j := 0; j < operationsPerGoroutine; j++ {
 | 
			
		||||
				key := fmt.Sprintf("%skey%d", baseKey, j)
 | 
			
		||||
				val, ok := cache.Get(ctx, key)
 | 
			
		||||
				if !ok {
 | 
			
		||||
					t.Errorf("Key '%s' should exist in cache", key)
 | 
			
		||||
					continue
 | 
			
		||||
				}
 | 
			
		||||
				expected := fmt.Sprintf("value%d-%d", id, j)
 | 
			
		||||
				if val != expected {
 | 
			
		||||
					t.Errorf("For key '%s', expected '%s', got '%s'", key, expected, val)
 | 
			
		||||
				}
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			// Delete half the keys
 | 
			
		||||
			for j := 0; j < operationsPerGoroutine/2; j++ {
 | 
			
		||||
				key := fmt.Sprintf("%skey%d", baseKey, j)
 | 
			
		||||
				cache.Delete(ctx, key)
 | 
			
		||||
			}
 | 
			
		||||
		}(i)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	wg.Wait()
 | 
			
		||||
 | 
			
		||||
	// Verify size and deletion
 | 
			
		||||
	var totalKeysExpected int64 = goroutines * operationsPerGoroutine / 2
 | 
			
		||||
	if cache.Size() != totalKeysExpected {
 | 
			
		||||
		t.Errorf("Expected cache size to be %d, got %d", totalKeysExpected, cache.Size())
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestEvictionCallback(t *testing.T) {
 | 
			
		||||
	ctx := context.Background()
 | 
			
		||||
	evicted := make(map[string]interface{})
 | 
			
		||||
	evictedMu := sync.Mutex{}
 | 
			
		||||
 | 
			
		||||
	config := DefaultConfig()
 | 
			
		||||
	config.DefaultTTL = 50 * time.Millisecond
 | 
			
		||||
	config.CleanupInterval = 25 * time.Millisecond
 | 
			
		||||
	config.OnEviction = func(key string, value interface{}) {
 | 
			
		||||
		evictedMu.Lock()
 | 
			
		||||
		evicted[key] = value
 | 
			
		||||
		evictedMu.Unlock()
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	cache := New(config)
 | 
			
		||||
	defer cache.Close()
 | 
			
		||||
 | 
			
		||||
	// Add items
 | 
			
		||||
	cache.Set(ctx, "key1", "value1")
 | 
			
		||||
	cache.Set(ctx, "key2", "value2")
 | 
			
		||||
 | 
			
		||||
	// Manually delete
 | 
			
		||||
	cache.Delete(ctx, "key1")
 | 
			
		||||
 | 
			
		||||
	// Verify manual deletion triggered callback
 | 
			
		||||
	time.Sleep(10 * time.Millisecond) // Small delay to ensure callback processed
 | 
			
		||||
	evictedMu.Lock()
 | 
			
		||||
	if evicted["key1"] != "value1" {
 | 
			
		||||
		t.Errorf("Eviction callback not triggered for manual deletion")
 | 
			
		||||
	}
 | 
			
		||||
	evictedMu.Unlock()
 | 
			
		||||
 | 
			
		||||
	// Wait for automatic expiration
 | 
			
		||||
	time.Sleep(60 * time.Millisecond)
 | 
			
		||||
 | 
			
		||||
	// Verify TTL expiration triggered callback
 | 
			
		||||
	evictedMu.Lock()
 | 
			
		||||
	if evicted["key2"] != "value2" {
 | 
			
		||||
		t.Errorf("Eviction callback not triggered for TTL expiration")
 | 
			
		||||
	}
 | 
			
		||||
	evictedMu.Unlock()
 | 
			
		||||
}
 | 
			
		||||
					Loading…
					
					
				
		Reference in New Issue