diff --git a/server/profiler/profiler.go b/server/profiler/profiler.go index a7b4f8c79..4e09571a1 100644 --- a/server/profiler/profiler.go +++ b/server/profiler/profiler.go @@ -25,7 +25,7 @@ func NewProfiler() *Profiler { } // RegisterRoutes adds profiling endpoints to the Echo server. -func (p *Profiler) RegisterRoutes(e *echo.Echo) { +func (*Profiler) RegisterRoutes(e *echo.Echo) { // Register pprof handlers g := e.Group("/debug/pprof") g.GET("", echo.WrapHandler(http.HandlerFunc(pprof.Index))) @@ -97,7 +97,6 @@ func (p *Profiler) StartMemoryMonitor(ctx context.Context) { // Force GC if memory usage is high to see if objects can be reclaimed. if m.HeapAlloc > 500*1024*1024 { // 500 MB threshold slog.Info("forcing garbage collection due to high memory usage") - runtime.GC() } case <-ctx.Done(): return diff --git a/server/router/rss/rss.go b/server/router/rss/rss.go index 8a4435ea1..b03d1858b 100644 --- a/server/router/rss/rss.go +++ b/server/router/rss/rss.go @@ -98,7 +98,7 @@ func (s *RSSService) GetUserRSS(c echo.Context) error { } func (s *RSSService) generateRSSFromMemoList(ctx context.Context, memoList []*store.Memo, baseURL string) (string, error) { - rssHeading, err := getRSSHeading(s.Store, ctx) + rssHeading, err := getRSSHeading(ctx, s.Store) if err != nil { return "", err } @@ -160,8 +160,8 @@ func getRSSItemDescription(content string) (string, error) { return result, nil } -func getRSSHeading(store *store.Store, ctx context.Context) (RSSHeading, error) { - settings, err := store.GetWorkspaceGeneralSetting(ctx) +func getRSSHeading(ctx context.Context, stores *store.Store) (RSSHeading, error) { + settings, err := stores.GetWorkspaceGeneralSetting(ctx) if err != nil { return RSSHeading{}, err } diff --git a/server/runner/memopayload/runner.go b/server/runner/memopayload/runner.go index 6dc346377..596d7e83a 100644 --- a/server/runner/memopayload/runner.go +++ b/server/runner/memopayload/runner.go @@ -3,7 +3,6 @@ package memopayload import ( "context" "log/slog" - "runtime" "slices" "github.com/pkg/errors" @@ -70,9 +69,6 @@ func (r *Runner) RunOnce(ctx context.Context) { // Move to next batch offset += len(memos) - - // Force garbage collection between batches to prevent memory accumulation - runtime.GC() } } diff --git a/server/runner/s3presign/runner.go b/server/runner/s3presign/runner.go index d1d4a3264..085d2b670 100644 --- a/server/runner/s3presign/runner.go +++ b/server/runner/s3presign/runner.go @@ -3,7 +3,6 @@ package s3presign import ( "context" "log/slog" - "runtime" "time" "google.golang.org/protobuf/types/known/timestamppb" @@ -131,8 +130,5 @@ func (r *Runner) CheckAndPresign(ctx context.Context) { // Move to next batch offset += len(resources) - - // Prevent memory accumulation between batches - runtime.GC() } } diff --git a/server/server.go b/server/server.go index 5bb33ce89..7f177b042 100644 --- a/server/server.go +++ b/server/server.go @@ -163,9 +163,6 @@ func (s *Server) Shutdown(ctx context.Context) { // Stop the profiler if s.profiler != nil { slog.Info("stopping profiler") - // Force one last garbage collection to clean up remaining objects - runtime.GC() - // Log final memory stats var m runtime.MemStats runtime.ReadMemStats(&m) diff --git a/store/cache/cache.go b/store/cache/cache.go index f4674fbc7..69d9cfe57 100644 --- a/store/cache/cache.go +++ b/store/cache/cache.go @@ -95,7 +95,7 @@ func (c *Cache) Set(ctx context.Context, key string, value any) { } // SetWithTTL adds a value to the cache with a custom TTL. -func (c *Cache) SetWithTTL(ctx context.Context, key string, value any, ttl time.Duration) { +func (c *Cache) SetWithTTL(_ context.Context, key string, value any, ttl time.Duration) { // Estimate size of the item (very rough approximation). size := estimateSize(value) @@ -120,13 +120,18 @@ func (c *Cache) SetWithTTL(ctx context.Context, key string, value any, ttl time. } // Get retrieves a value from the cache. -func (c *Cache) Get(ctx context.Context, key string) (any, bool) { +func (c *Cache) Get(_ context.Context, key string) (any, bool) { value, ok := c.data.Load(key) if !ok { return nil, false } - itm := value.(item) + itm, ok := value.(item) + if !ok { + // If the value is not of type item, it means it was corrupted or not set correctly. + c.data.Delete(key) + return nil, false + } if time.Now().After(itm.expiration) { c.data.Delete(key) atomic.AddInt64(&c.itemCount, -1) @@ -142,22 +147,22 @@ func (c *Cache) Get(ctx context.Context, key string) (any, bool) { } // Delete removes a value from the cache. -func (c *Cache) Delete(ctx context.Context, key string) { +func (c *Cache) Delete(_ context.Context, key string) { if value, loaded := c.data.LoadAndDelete(key); loaded { atomic.AddInt64(&c.itemCount, -1) if c.config.OnEviction != nil { - itm := value.(item) + itm, _ := value.(item) c.config.OnEviction(key, itm.value) } } } // Clear removes all values from the cache. -func (c *Cache) Clear(ctx context.Context) { +func (c *Cache) Clear(_ context.Context) { if c.config.OnEviction != nil { c.data.Range(func(key, value any) bool { - itm := value.(item) + itm, _ := value.(item) c.config.OnEviction(key.(string), itm.value) return true }) @@ -209,7 +214,7 @@ func (c *Cache) cleanup() { count := 0 c.data.Range(func(key, value any) bool { - itm := value.(item) + itm, _ := value.(item) if time.Now().After(itm.expiration) { c.data.Delete(key) count++