[v15.0/forgejo]: chore: add modernizer linter (#11949)

**Backport: !11936**

- Go has a suite of small linters that helps with modernizing Go code by using newer functions and catching small mistakes, https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/modernize.
- Enable this linter in golangci-lint.
- There's also [`go fix`](https://go.dev/blog/gofix), which is not yet released as a linter in golangci-lint: https://github.com/golangci/golangci-lint/pull/6385

Reviewed-on: https://codeberg.org/forgejo/forgejo/pulls/11949
Reviewed-by: Mathieu Fenniak <mfenniak@noreply.codeberg.org>
Co-authored-by: Gusted <postmaster@gusted.xyz>
Co-committed-by: Gusted <postmaster@gusted.xyz>
This commit is contained in:
Gusted 2026-04-02 16:54:46 +02:00 committed by Mathieu Fenniak
parent a32804bebe
commit 607d031069
247 changed files with 650 additions and 1001 deletions

View file

@ -83,7 +83,7 @@ func prepareLevelDB(cfg *BaseConfig) (conn string, db *leveldb.DB, err error) {
}
conn = cfg.ConnStr
}
for i := 0; i < 10; i++ {
for range 10 {
if db, err = nosql.GetManager().GetLevelDB(conn); err == nil {
break
}

View file

@ -49,7 +49,7 @@ func newBaseRedisGeneric(cfg *BaseConfig, unique bool, client nosql.RedisClient)
}
var err error
for i := 0; i < 10; i++ {
for range 10 {
err = client.Ping(graceful.GetManager().ShutdownContext()).Err()
if err == nil {
break

View file

@ -88,7 +88,7 @@ func testQueueBasic(t *testing.T, newFn func(cfg *BaseConfig) (baseQueue, error)
// test blocking push if queue is full
for i := 0; i < cfg.Length; i++ {
err = q.PushItem(ctx, []byte(fmt.Sprintf("item-%d", i)))
err = q.PushItem(ctx, fmt.Appendf(nil, "item-%d", i))
require.NoError(t, err)
}
ctxTimed, cancel = context.WithTimeout(ctx, 10*time.Millisecond)

View file

@ -5,6 +5,7 @@ package queue
import (
"context"
"maps"
"sync"
"time"
@ -68,9 +69,7 @@ func (m *Manager) ManagedQueues() map[int64]ManagedWorkerPoolQueue {
defer m.mu.Unlock()
queues := make(map[int64]ManagedWorkerPoolQueue, len(m.Queues))
for k, v := range m.Queues {
queues[k] = v
}
maps.Copy(queues, m.Queues)
return queues
}

View file

@ -142,11 +142,7 @@ func (q *WorkerPoolQueue[T]) basePushForShutdown(items ...T) bool {
// doStartNewWorker starts a new worker for the queue, the worker reads from worker's channel and handles the items.
func (q *WorkerPoolQueue[T]) doStartNewWorker(wp *workerGroup[T]) {
wp.wg.Add(1)
go func() {
defer wp.wg.Done()
wp.wg.Go(func() {
log.Debug("Queue %q starts new worker", q.GetName())
defer log.Debug("Queue %q stops idle worker", q.GetName())
@ -187,7 +183,7 @@ func (q *WorkerPoolQueue[T]) doStartNewWorker(wp *workerGroup[T]) {
q.workerNumMu.Unlock()
}
}
}()
})
}
// doFlush flushes the queue: it tries to read all items from the queue and handles them.

View file

@ -78,17 +78,17 @@ func TestWorkerPoolQueueUnhandled(t *testing.T) {
runCount := 2 // we can run these tests even hundreds times to see its stability
t.Run("1/1", func(t *testing.T) {
for i := 0; i < runCount; i++ {
for range runCount {
test(t, setting.QueueSettings{BatchLength: 1, MaxWorkers: 1})
}
})
t.Run("3/1", func(t *testing.T) {
for i := 0; i < runCount; i++ {
for range runCount {
test(t, setting.QueueSettings{BatchLength: 3, MaxWorkers: 1})
}
})
t.Run("4/5", func(t *testing.T) {
for i := 0; i < runCount; i++ {
for range runCount {
test(t, setting.QueueSettings{BatchLength: 4, MaxWorkers: 5})
}
})
@ -97,17 +97,17 @@ func TestWorkerPoolQueueUnhandled(t *testing.T) {
func TestWorkerPoolQueuePersistence(t *testing.T) {
runCount := 2 // we can run these tests even hundreds times to see its stability
t.Run("1/1", func(t *testing.T) {
for i := 0; i < runCount; i++ {
for range runCount {
testWorkerPoolQueuePersistence(t, setting.QueueSettings{BatchLength: 1, MaxWorkers: 1, Length: 100})
}
})
t.Run("3/1", func(t *testing.T) {
for i := 0; i < runCount; i++ {
for range runCount {
testWorkerPoolQueuePersistence(t, setting.QueueSettings{BatchLength: 3, MaxWorkers: 1, Length: 100})
}
})
t.Run("4/5", func(t *testing.T) {
for i := 0; i < runCount; i++ {
for range runCount {
testWorkerPoolQueuePersistence(t, setting.QueueSettings{BatchLength: 4, MaxWorkers: 5, Length: 100})
}
})
@ -142,7 +142,7 @@ func testWorkerPoolQueuePersistence(t *testing.T, queueSetting setting.QueueSett
q, _ := newWorkerPoolQueueForTest("pr_patch_checker_test", queueSetting, testHandler, true)
stop := runWorkerPoolQueue(q)
for i := 0; i < testCount; i++ {
for i := range testCount {
_ = q.Push("task-" + strconv.Itoa(i))
}
close(startWhenAllReady)
@ -187,7 +187,7 @@ func TestWorkerPoolQueueActiveWorkers(t *testing.T) {
q, _ := newWorkerPoolQueueForTest("test-workpoolqueue", setting.QueueSettings{Type: "channel", BatchLength: 1, MaxWorkers: 1, Length: 100}, handler, false)
stop := runWorkerPoolQueue(q)
for i := 0; i < 5; i++ {
for i := range 5 {
require.NoError(t, q.Push(i))
}
@ -203,7 +203,7 @@ func TestWorkerPoolQueueActiveWorkers(t *testing.T) {
q, _ = newWorkerPoolQueueForTest("test-workpoolqueue", setting.QueueSettings{Type: "channel", BatchLength: 1, MaxWorkers: 3, Length: 100}, handler, false)
stop = runWorkerPoolQueue(q)
for i := 0; i < 15; i++ {
for i := range 15 {
require.NoError(t, q.Push(i))
}
@ -264,12 +264,12 @@ func TestWorkerPoolQueueWorkerIdleReset(t *testing.T) {
stop := runWorkerPoolQueue(q)
const workloadSize = 12
for i := 0; i < workloadSize; i++ {
for i := range workloadSize {
require.NoError(t, q.Push(i))
}
workerIDs := make(map[string]struct{})
for i := 0; i < workloadSize; i++ {
for i := range workloadSize {
c := <-chGoroutineIDs
workerIDs[c] = struct{}{}
t.Logf("%d workers: overall=%d current=%d", i, len(workerIDs), q.GetWorkerNumber())