-

Migrate repository

+

{{ctx.Locale.Tr "migrate.select.title"}}

{{template "repo/migrate/helper" .}}
{{range .Services}} diff --git a/templates/repo/migrate/migrating.tmpl b/templates/repo/migrate/migrating.tmpl index a3a1baf57b..6b2db26aaf 100644 --- a/templates/repo/migrate/migrating.tmpl +++ b/templates/repo/migrate/migrating.tmpl @@ -86,11 +86,11 @@ - {{$address := MirrorRemoteAddress $.Context .Repository .PullMirror.GetRemoteName}}
diff --git a/tests/integration/mirror_pull_test.go b/tests/integration/mirror_pull_test.go index 03d4bdcf92..a0de586aaa 100644 --- a/tests/integration/mirror_pull_test.go +++ b/tests/integration/mirror_pull_test.go @@ -5,9 +5,16 @@ package integration import ( + "fmt" "net/http" + "net/url" + "os" + "path" + "strings" "testing" + "time" + "forgejo.org/models/auth" "forgejo.org/models/db" repo_model "forgejo.org/models/repo" "forgejo.org/models/unittest" @@ -15,94 +22,546 @@ import ( "forgejo.org/modules/git" "forgejo.org/modules/gitrepo" "forgejo.org/modules/migration" + "forgejo.org/modules/optional" + "forgejo.org/modules/process" + "forgejo.org/modules/setting" + "forgejo.org/modules/structs" app_context "forgejo.org/services/context" + "forgejo.org/services/forms" + "forgejo.org/services/migrations" mirror_service "forgejo.org/services/mirror" release_service "forgejo.org/services/release" repo_service "forgejo.org/services/repository" + files_service "forgejo.org/services/repository/files" "forgejo.org/tests" + "github.com/PuerkitoBio/goquery" + "github.com/google/uuid" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestMirrorPull(t *testing.T) { - defer tests.PrepareTestEnv(t)() + t.Run("Basic", func(t *testing.T) { + defer tests.PrepareTestEnv(t)() - user := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 2}) - repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 1}) - repoPath := repo_model.RepoPath(user.Name, repo.Name) + user := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 2}) + repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 1}) + repoPath := repo_model.RepoPath(user.Name, repo.Name) - opts := migration.MigrateOptions{ - RepoName: "test_mirror", - Description: "Test mirror", - Private: false, - Mirror: true, - CloneAddr: repoPath, - Wiki: true, - Releases: false, - } + opts := migration.MigrateOptions{ + RepoName: "test_mirror", + Description: "Test mirror", + Private: false, + Mirror: true, + CloneAddr: repoPath, + Wiki: true, + Releases: false, + } - mirrorRepo, err := repo_service.CreateRepositoryDirectly(db.DefaultContext, user, user, repo_service.CreateRepoOptions{ - Name: opts.RepoName, - Description: opts.Description, - IsPrivate: opts.Private, - IsMirror: opts.Mirror, - Status: repo_model.RepositoryBeingMigrated, + mirrorRepo, err := repo_service.CreateRepositoryDirectly(db.DefaultContext, user, user, repo_service.CreateRepoOptions{ + Name: opts.RepoName, + Description: opts.Description, + IsPrivate: opts.Private, + IsMirror: opts.Mirror, + Status: repo_model.RepositoryBeingMigrated, + }) + require.NoError(t, err) + assert.True(t, mirrorRepo.IsMirror, "expected pull-mirror repo to be marked as a mirror immediately after its creation") + + ctx := t.Context() + + mirror, err := repo_service.MigrateRepositoryGitData(ctx, user, mirrorRepo, opts, nil) + require.NoError(t, err) + + gitRepo, err := gitrepo.OpenRepository(git.DefaultContext, repo) + require.NoError(t, err) + defer gitRepo.Close() + + findOptions := repo_model.FindReleasesOptions{ + IncludeDrafts: true, + IncludeTags: true, + RepoID: mirror.ID, + } + initCount, err := db.Count[repo_model.Release](db.DefaultContext, findOptions) + require.NoError(t, err) + + require.NoError(t, release_service.CreateRelease(gitRepo, &repo_model.Release{ + RepoID: repo.ID, + Repo: repo, + PublisherID: user.ID, + Publisher: user, + TagName: "v0.2", + Target: "master", + Title: "v0.2 is released", + Note: "v0.2 is released", + IsDraft: false, + IsPrerelease: false, + IsTag: true, + }, "", []*release_service.AttachmentChange{})) + + _, err = repo_model.GetMirrorByRepoID(ctx, mirror.ID) + require.NoError(t, err) + + ok := mirror_service.SyncPullMirror(ctx, mirror.ID) + assert.True(t, ok) + + count, err := db.Count[repo_model.Release](db.DefaultContext, findOptions) + require.NoError(t, err) + assert.Equal(t, initCount+1, count) + + release, err := repo_model.GetRelease(db.DefaultContext, repo.ID, "v0.2") + require.NoError(t, err) + require.NoError(t, release_service.DeleteReleaseByID(ctx, repo, release, user, true)) + + ok = mirror_service.SyncPullMirror(ctx, mirror.ID) + assert.True(t, ok) + + count, err = db.Count[repo_model.Release](db.DefaultContext, findOptions) + require.NoError(t, err) + assert.Equal(t, initCount, count) }) - require.NoError(t, err) - assert.True(t, mirrorRepo.IsMirror, "expected pull-mirror repo to be marked as a mirror immediately after its creation") - ctx := t.Context() - - mirror, err := repo_service.MigrateRepositoryGitData(ctx, user, mirrorRepo, opts, nil) - require.NoError(t, err) - - gitRepo, err := gitrepo.OpenRepository(git.DefaultContext, repo) - require.NoError(t, err) - defer gitRepo.Close() - - findOptions := repo_model.FindReleasesOptions{ - IncludeDrafts: true, - IncludeTags: true, - RepoID: mirror.ID, + // How will we interact with the pull mirror during this test? + interactionMethod := []struct { + name string + useAPI bool + createPullMirror func(t *testing.T, sourceRepo *repo_model.Repository, authenticate bool) (repoName string) + verifyMirrorDetails func(t *testing.T, sourceRepo *repo_model.Repository, mirrorName string, authenticate bool) + triggerPullMirror func(t *testing.T, mirrorName string) + changePullMirrorCredentials func(t *testing.T, sourceRepo *repo_model.Repository, mirrorName string, authenticate bool) + changePullMirrorAddress func(t *testing.T, sourceRepo *repo_model.Repository, mirrorName string, authenticate bool) + }{ + { + name: "API", + useAPI: true, + createPullMirror: createPullMirrorViaAPI, + triggerPullMirror: triggerPullMirrorViaAPI, + verifyMirrorDetails: func(t *testing.T, sourceRepo *repo_model.Repository, mirrorName string, authenticate bool) { + // API provides no visibility into a repo's mirror settings right now + }, + }, + { + name: "Web", + useAPI: false, + createPullMirror: createPullMirrorViaWeb, + triggerPullMirror: triggerPullMirrorViaWeb, + verifyMirrorDetails: verifyPullMirrorViaWeb, + changePullMirrorCredentials: changePullMirrorCredentialsViaWeb, + changePullMirrorAddress: changePullMirrorCredentialsViaWeb, // one endpoint, so same as creds + }, } - initCount, err := db.Count[repo_model.Release](db.DefaultContext, findOptions) + + mirrorConfiguration := []struct { + name string + privateSource bool + }{ + { + name: "HTTP Without Auth", + }, + { + name: "HTTP With Auth", + privateSource: true, + }, + } + + // Not using MockVariableValue due to need to undo `migrations.Init()` + prev := setting.Migrations.AllowedDomains + setting.Migrations.AllowedDomains = "localhost" + migrations.Init() // reinitialize for changed allowList + defer func() { + setting.Migrations.AllowedDomains = prev + migrations.Init() // reinitialize for changed allowList + }() + + onApplicationRun(t, func(t *testing.T, u *url.URL) { + for _, im := range interactionMethod { + for _, mc := range mirrorConfiguration { + t.Run(fmt.Sprintf("%s/%s", im.name, mc.name), func(t *testing.T) { + defer tests.PrintCurrentTest(t)() + + user2 := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 2}) + + // Create the source repository that will be mirrored. + sourceRepo, sourceRepoSha, cleanupSource := tests.CreateDeclarativeRepoWithOptions(t, user2, + tests.DeclarativeRepoOptions{ + IsPrivate: optional.Some(mc.privateSource), + Files: optional.Some([]*files_service.ChangeRepoFile{ + { + Operation: "create", + TreePath: "docs.md", + ContentReader: strings.NewReader("hello, world"), + }, + }), + }, + ) + defer cleanupSource() + require.NotEmpty(t, sourceRepoSha) + + // Create pull mirror + mirror := im.createPullMirror(t, sourceRepo, mc.privateSource) + verifyPullMirrorContents(t, mirror, sourceRepoSha) + verifyPullMirrorConfig(t, mirror, sourceRepo, mc.privateSource) + im.verifyMirrorDetails(t, sourceRepo, mirror, mc.privateSource) + + // Push a change to the source and refresh the mirror + sourceRepoSha = changePullMirrorSource(t, sourceRepo, sourceRepoSha) + im.triggerPullMirror(t, mirror) + waitForPullMirror(t, mirror, sourceRepoSha) + + // Test changing the mirror's authentication method (if available) + if mc.privateSource && im.changePullMirrorCredentials != nil { + sourceRepoSha = changePullMirrorSource(t, sourceRepo, sourceRepoSha) + im.changePullMirrorCredentials(t, sourceRepo, mirror, mc.privateSource) + verifyPullMirrorConfig(t, mirror, sourceRepo, mc.privateSource) + im.verifyMirrorDetails(t, sourceRepo, mirror, mc.privateSource) + im.triggerPullMirror(t, mirror) + waitForPullMirror(t, mirror, sourceRepoSha) + } + + // Test changing the mirror's address (if available) + if im.changePullMirrorAddress != nil { + sourceRepo = renamePullMirrorSourceRepo(t, sourceRepo) + sourceRepoSha = changePullMirrorSource(t, sourceRepo, sourceRepoSha) + im.changePullMirrorAddress(t, sourceRepo, mirror, mc.privateSource) + verifyPullMirrorConfig(t, mirror, sourceRepo, mc.privateSource) + im.verifyMirrorDetails(t, sourceRepo, mirror, mc.privateSource) + im.triggerPullMirror(t, mirror) + waitForPullMirror(t, mirror, sourceRepoSha) + } + }) + } + } + }) + + t.Run("migrate from repo config credentials", func(t *testing.T) { + defer tests.PrintCurrentTest(t)() + + user2 := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 2}) + + mirrorRepo, _, cleanupMirror := tests.CreateDeclarativeRepoWithOptions(t, user2, + tests.DeclarativeRepoOptions{}, + ) + defer cleanupMirror() + + // Write to the repo a config file that would have plausibly existed before EncryptedRemoteAddress was + // introduced: + repoPath := mirrorRepo.RepoPath() + err := os.WriteFile(path.Join(repoPath, "config"), []byte(` +[core] + repositoryformatversion = 0 + filemode = true + bare = true +[remote "origin"] + url = https://user:password@example.com/org/repo.git + tagOpt = --no-tags + fetch = +refs/*:refs/* + mirror = true + fetch = +refs/tags/*:refs/tags/* +`), 0o644) + require.NoError(t, err) + + // Create a Mirror record without an EncryptedRemoteAddress: + mirror := &repo_model.Mirror{ + RepoID: mirrorRepo.ID, + Interval: 8 * time.Hour, + EnablePrune: true, + } + _, err = db.GetEngine(t.Context()).Insert(mirror) + require.NoError(t, err) + require.Nil(t, mirror.EncryptedRemoteAddress) + + remoteURL, err := mirror_service.DecryptOrRecoverRemoteAddress(t.Context(), mirror) + require.NoError(t, err) + assert.Equal(t, "https://user:password@example.com/org/repo.git", remoteURL.URL.String()) + + // EncryptedRemoteAddress should now be populated from the recovery: + assert.NotNil(t, mirror.EncryptedRemoteAddress) + maybeDecryptedURL, err := mirror.DecryptRemoteAddress() + require.NoError(t, err) + has, decryptedURL := maybeDecryptedURL.Get() + require.True(t, has) + assert.Equal(t, "https://user:password@example.com/org/repo.git", decryptedURL) + + // SanitizedRemoteAddress can be fetched: + maybeSanitizedURL, err := mirror.SanitizedRemoteAddress() + require.NoError(t, err) + has, sanitizedURL := maybeSanitizedURL.Get() + require.True(t, has) + assert.Equal(t, "https://user@example.com/org/repo.git", sanitizedURL) + + // Database record is updated in the database: + refetchMirror := unittest.AssertExistsAndLoadBean(t, &repo_model.Mirror{RepoID: mirrorRepo.ID}) + assert.Equal(t, mirror.EncryptedRemoteAddress, refetchMirror.EncryptedRemoteAddress) + + // Config file is rewritten: + config, err := os.ReadFile(path.Join(repoPath, "config")) + require.NoError(t, err) + assert.Equal(t, ` +[core] + repositoryformatversion = 0 + filemode = true + bare = true +[remote "origin"] + url = https://user@example.com/org/repo.git + tagOpt = --no-tags + fetch = +refs/*:refs/* + mirror = true + fetch = +refs/tags/*:refs/tags/* +`, string(config)) + }) +} + +func createPullMirrorViaWeb(t *testing.T, sourceRepo *repo_model.Repository, authenticate bool) string { + session := loginUser(t, "user2") + + mirrorName := fmt.Sprintf("pullmirror-%s", sourceRepo.Name) + form := &forms.MigrateRepoForm{ + CloneAddr: sourceRepo.CloneLink().HTTPS, + Service: structs.PlainGitService, + UID: 2, + RepoName: mirrorName, + Mirror: true, + } + if authenticate { + form.AuthUsername = "user2" + form.AuthPassword = getTokenForLoggedInUser(t, session, auth.AccessTokenScopeReadRepository) + } + + resp := session.MakeRequest(t, + NewRequestWithJSON(t, "POST", "/repo/migrate", form), + http.StatusSeeOther) + location := resp.Header().Get("Location") + assert.Equal(t, fmt.Sprintf("/user2/pullmirror-%s", sourceRepo.Name), location) + + var lastBody string + if !assert.Eventuallyf(t, + func() bool { + resp := session.MakeRequest(t, + NewRequest(t, "GET", location), + http.StatusOK) + body := resp.Body.String() + lastBody = body + // Looking for the repo page to be fully populated indicating that the migration is complete: + // Check that the first commit message is present: + if !strings.Contains(body, "Initial commit") { + return false + } + // Check that the fork button is present: + if !strings.Contains(body, fmt.Sprintf("/user2/%s/fork", mirrorName)) { + return false + } + return true + }, + 15*time.Second, 1*time.Second, + "expected migration to complete and repo page to render") { + t.Logf("last received page body: %s", lastBody) + } + + return mirrorName +} + +func createPullMirrorViaAPI(t *testing.T, sourceRepo *repo_model.Repository, authenticate bool) string { + session := loginUser(t, "user2") + apiToken := getTokenForLoggedInUser(t, session, auth.AccessTokenScopeWriteRepository) + + mirrorName := fmt.Sprintf("pullmirror-%s", sourceRepo.Name) + form := &structs.MigrateRepoOptions{ + CloneAddr: sourceRepo.CloneLink().HTTPS, + Service: "git", + RepoOwner: "user2", + RepoName: mirrorName, + Mirror: true, + } + if authenticate { + form.AuthUsername = "user2" + form.AuthPassword = getTokenForLoggedInUser(t, session, auth.AccessTokenScopeReadRepository) + } + + resp := session.MakeRequest(t, + NewRequestWithJSON(t, "POST", "/api/v1/repos/migrate", form).AddTokenAuth(apiToken), + http.StatusCreated) + var repo structs.Repository + DecodeJSON(t, resp, &repo) + assert.NotNil(t, repo) + assert.True(t, repo.Mirror) + assert.False(t, repo.Empty) + + return mirrorName +} + +func verifyPullMirrorViaWeb(t *testing.T, sourceRepo *repo_model.Repository, mirrorName string, authenticate bool) { + session := loginUser(t, "user2") + resp := session.MakeRequest(t, + NewRequestf(t, "GET", "/user2/%s/settings", mirrorName), + http.StatusOK) + htmlDoc := NewHTMLParser(t, resp.Body) + htmlDoc.AssertAttrEqual(t, "#mirror_address", "value", sourceRepo.CloneLink().HTTPS) + if authenticate { + htmlDoc.AssertAttrEqual(t, "#mirror_username", "value", "user2") + htmlDoc.AssertAttrEqual(t, "#mirror_password", "value", "") + htmlDoc.AssertAttrEqual(t, "#mirror_password", "placeholder", "(Unchanged)") + } else { + htmlDoc.AssertAttrEqual(t, "#mirror_username", "value", "") + htmlDoc.AssertAttrEqual(t, "#mirror_password", "value", "") + htmlDoc.AssertAttrEqual(t, "#mirror_password", "placeholder", "(Unset)") + } + + resp = session.MakeRequest(t, + NewRequestf(t, "GET", "/user2/%s", mirrorName), + http.StatusOK) + htmlDoc = NewHTMLParser(t, resp.Body) + htmlDoc.AssertElementPredicate(t, ".fork-flag", func(selection *goquery.Selection) bool { + text := strings.TrimSpace(selection.Text()) + assert.Contains(t, text, "mirror of") + assert.Contains(t, text, sourceRepo.CloneLink().HTTPS) + return true + }) +} + +func triggerPullMirrorViaWeb(t *testing.T, mirrorName string) { + session := loginUser(t, "user2") + + resp := session.MakeRequest(t, + NewRequestWithValues(t, "POST", fmt.Sprintf("/user2/%s/settings", mirrorName), map[string]string{"action": "mirror-sync"}), + http.StatusSeeOther) + location := resp.Header().Get("Location") + assert.Equal(t, fmt.Sprintf("/user2/%s/settings", mirrorName), location) +} + +func triggerPullMirrorViaAPI(t *testing.T, mirrorName string) { + session := loginUser(t, "user2") + apiToken := getTokenForLoggedInUser(t, session, auth.AccessTokenScopeWriteRepository) + + // Trigger sync... + session.MakeRequest(t, + NewRequestf(t, "POST", "/api/v1/repos/user2/%s/mirror-sync", mirrorName).AddTokenAuth(apiToken), + http.StatusOK) +} + +func changePullMirrorCredentialsViaWeb(t *testing.T, sourceRepo *repo_model.Repository, mirrorName string, authenticate bool) { + session := loginUser(t, "user2") + + form := map[string]string{ + "action": "mirror", + "enable_prune": "on", + "interval": "8h0m0s", + "mirror_address": sourceRepo.CloneLink().HTTPS, + } + if authenticate { + form["mirror_username"] = "user2" + form["mirror_password"] = getTokenForLoggedInUser(t, session, auth.AccessTokenScopeReadRepository) + } + + resp := session.MakeRequest(t, + NewRequestWithValues(t, "POST", fmt.Sprintf("/user2/%s/settings", mirrorName), form), + http.StatusSeeOther) + location := resp.Header().Get("Location") + assert.Equal(t, fmt.Sprintf("/user2/%s/settings", mirrorName), location) +} + +func verifyPullMirrorContents(t *testing.T, mirrorName, expectedSha string) { + session := loginUser(t, "user2") + apiToken := getTokenForLoggedInUser(t, session, auth.AccessTokenScopeReadRepository) + resp := session.MakeRequest(t, + NewRequest(t, "GET", fmt.Sprintf("/api/v1/repos/user2/%s/commits?sha=main&limit=1", mirrorName)).AddTokenAuth(apiToken), + http.StatusOK) + var commits []*structs.Commit + DecodeJSON(t, resp, &commits) + require.Len(t, commits, 1) + assert.Equal(t, expectedSha, commits[0].SHA) +} + +func waitForPullMirror(t *testing.T, mirrorName, expectedSha string) { + session := loginUser(t, "user2") + apiToken := getTokenForLoggedInUser(t, session, auth.AccessTokenScopeReadRepository) + + var commits []*structs.Commit + if !assert.Eventually(t, + func() bool { + resp := session.MakeRequest(t, + NewRequest(t, "GET", fmt.Sprintf("/api/v1/repos/user2/%s/commits?sha=main&limit=1", mirrorName)).AddTokenAuth(apiToken), + http.StatusOK) + DecodeJSON(t, resp, &commits) + require.Len(t, commits, 1) + return commits[0].SHA == expectedSha + }, + 15*time.Second, 1*time.Second) { + t.Logf("sync was supposed to bring repo to commit %s, but observed commits = %#v", expectedSha, commits) + } +} + +func getGitConfig(t *testing.T, configFile, configPath string) string { + stdout, stderr, err := process.GetManager().Exec("getGitConfig", "git", "config", "get", "--file", configFile, configPath) + require.NoError(t, err, "fetch config %s failed: git stderr: %s", configPath, stderr) + return strings.TrimSpace(stdout) +} + +func verifyPullMirrorConfig(t *testing.T, mirrorName string, sourceRepo *repo_model.Repository, authenticate bool) { + mirrorRepo, err := repo_model.GetRepositoryByOwnerAndName(t.Context(), "user2", mirrorName) require.NoError(t, err) - require.NoError(t, release_service.CreateRelease(gitRepo, &repo_model.Release{ - RepoID: repo.ID, - Repo: repo, - PublisherID: user.ID, - Publisher: user, - TagName: "v0.2", - Target: "master", - Title: "v0.2 is released", - Note: "v0.2 is released", - IsDraft: false, - IsPrerelease: false, - IsTag: true, - }, "", []*release_service.AttachmentChange{})) + repoPath := mirrorRepo.RepoPath() + configPath := path.Join(repoPath, "config") - _, err = repo_model.GetMirrorByRepoID(ctx, mirror.ID) + expectedURL := sourceRepo.CloneLink().HTTPS + if authenticate { + expectedURL = strings.Replace(expectedURL, "http://", "http://user2@", 1) + } + assert.Equal(t, expectedURL, getGitConfig(t, configPath, "remote.origin.url")) + assert.Equal(t, "true", getGitConfig(t, configPath, "remote.origin.mirror")) + assert.Equal(t, "+refs/tags/*:refs/tags/*", getGitConfig(t, configPath, "remote.origin.fetch")) +} + +func changePullMirrorSource(t *testing.T, sourceRepo *repo_model.Repository, sourceRepoSha string) string { + user2 := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 2}) + resp, err := files_service.ChangeRepoFiles(git.DefaultContext, sourceRepo, user2, + &files_service.ChangeRepoFilesOptions{ + Files: []*files_service.ChangeRepoFile{ + { + Operation: "update", + TreePath: "docs.md", + ContentReader: strings.NewReader(uuid.NewString()), + }, + }, + Message: "add files", + OldBranch: "main", + NewBranch: "main", + Author: &files_service.IdentityOptions{ + Name: user2.Name, + Email: user2.Email, + }, + Committer: &files_service.IdentityOptions{ + Name: user2.Name, + Email: user2.Email, + }, + Dates: &files_service.CommitDateOptions{ + Author: time.Now(), + Committer: time.Now(), + }, + LastCommitID: sourceRepoSha, + }) require.NoError(t, err) + assert.NotEmpty(t, resp) + return resp.Commit.SHA +} - ok := mirror_service.SyncPullMirror(ctx, mirror.ID) - assert.True(t, ok) +func renamePullMirrorSourceRepo(t *testing.T, sourceRepo *repo_model.Repository) *repo_model.Repository { + session := loginUser(t, "user2") + apiToken := getTokenForLoggedInUser(t, session, auth.AccessTokenScopeWriteRepository) - count, err := db.Count[repo_model.Release](db.DefaultContext, findOptions) - require.NoError(t, err) - assert.Equal(t, initCount+1, count) + newName := uuid.NewString() + session.MakeRequest(t, + NewRequestWithJSON(t, "PATCH", fmt.Sprintf("/api/v1/repos/user2/%s", sourceRepo.Name), + &structs.EditRepoOption{ + Name: &newName, + }).AddTokenAuth(apiToken), + http.StatusOK) - release, err := repo_model.GetRelease(db.DefaultContext, repo.ID, "v0.2") - require.NoError(t, err) - require.NoError(t, release_service.DeleteReleaseByID(ctx, repo, release, user, true)) - - ok = mirror_service.SyncPullMirror(ctx, mirror.ID) - assert.True(t, ok) - - count, err = db.Count[repo_model.Release](db.DefaultContext, findOptions) - require.NoError(t, err) - assert.Equal(t, initCount, count) + newRepo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: sourceRepo.ID}) + assert.Equal(t, newRepo.Name, newName) + assert.NotEqual(t, newRepo.CloneLink().HTTPS, sourceRepo.CloneLink().HTTPS) // should have changed to new name + return newRepo } func TestPullMirrorRedactCredentials(t *testing.T) { From 06888ca34ab60dc994938e01f84b6b49ea3e0e0d Mon Sep 17 00:00:00 2001 From: forgejo-backport-action Date: Sat, 4 Apr 2026 14:47:05 +0200 Subject: [PATCH 081/352] [v15.0/forgejo] fix: store pull mirror creds encrypted with keying (#11984) **Backport:** https://codeberg.org/forgejo/forgejo/pulls/11909 Fixes #9629. New pull mirrors have credentials stored encrypted in the database, the same as push mirrors, rather than in the repository's `config` file. `git fetch` on the pull mirror is updated to use the credential store. Pull mirrors will have their credentials migrated to the encrypted storage in the database as they're synced or otherwise accessed via the web UI. ## Checklist The [contributor guide](https://forgejo.org/docs/next/contributor/) contains information that will be helpful to first time contributors. All work and communication must conform to Forgejo's [AI Agreement](https://codeberg.org/forgejo/governance/src/branch/main/AIAgreement.md). There also are a few [conditions for merging Pull Requests in Forgejo repositories](https://codeberg.org/forgejo/governance/src/branch/main/PullRequestsAgreement.md). You are also welcome to join the [Forgejo development chatroom](https://matrix.to/#/#forgejo-development:matrix.org). ### Tests for Go changes - I added test coverage for Go changes... - [ ] in their respective `*_test.go` for unit tests. - [x] in the `tests/integration` directory if it involves interactions with a live Forgejo server. - I ran... - [x] `make pr-go` before pushing ### Documentation - [ ] I created a pull request [to the documentation](https://codeberg.org/forgejo/docs) to explain to Forgejo users how to use this change. - [x] I did not document these changes and I do not expect someone else to do it. ### Release notes - [x] This change will be noticed by a Forgejo user or admin (feature, bug fix, performance, etc.). I suggest to include a release note for this change. - [ ] This change is not visible to a Forgejo user or admin (refactor, dependency upgrade, etc.). I think there is no need to add a release note for this change. Co-authored-by: Mathieu Fenniak Reviewed-on: https://codeberg.org/forgejo/forgejo/pulls/11984 Reviewed-by: Mathieu Fenniak Co-authored-by: forgejo-backport-action Co-committed-by: forgejo-backport-action --- .../v15c_add_mirror_remoteaddressauth.go | 30 + models/repo/mirror.go | 79 ++- modules/git/command.go | 49 ++ modules/git/repo.go | 42 +- modules/keying/keying.go | 2 + modules/templates/util_misc.go | 15 +- routers/web/repo/setting/setting.go | 28 +- services/mirror/mirror_pull.go | 110 ++-- services/repository/migrate.go | 7 +- templates/repo/header.tmpl | 3 +- templates/repo/settings/options.tmpl | 4 +- tests/integration/mirror_pull_test.go | 587 ++++++++++++++++-- 12 files changed, 774 insertions(+), 182 deletions(-) create mode 100644 models/forgejo_migrations/v15c_add_mirror_remoteaddressauth.go diff --git a/models/forgejo_migrations/v15c_add_mirror_remoteaddressauth.go b/models/forgejo_migrations/v15c_add_mirror_remoteaddressauth.go new file mode 100644 index 0000000000..b2f30235ad --- /dev/null +++ b/models/forgejo_migrations/v15c_add_mirror_remoteaddressauth.go @@ -0,0 +1,30 @@ +// Copyright 2026 The Forgejo Authors. All rights reserved. +// SPDX-License-Identifier: GPL-3.0-or-later + +package forgejo_migrations + +import ( + "xorm.io/xorm" +) + +func init() { + registerMigration(&Migration{ + Description: "replace remote_address with encrypted_remote_address in table mirror", + Upgrade: addMirrorRemoteAddressAuth, + }) +} + +func addMirrorRemoteAddressAuth(x *xorm.Engine) error { + type Mirror struct { + EncryptedRemoteAddress []byte `xorm:"BLOB NULL"` + } + if _, err := x.SyncWithOptions(xorm.SyncOptions{IgnoreDropIndices: true}, new(Mirror)); err != nil { + return err + } + // No data migration is necessary or desired. `remote_address` contains sanitized URLs which don't have + // credentials, so they can't be migrated to `encrypted_remote_address`. Instead, as this data is accessed, + // `DecryptOrRecoverRemoteAddress` will recover the fully credentialed contents of the remote address from the git + // repo's `origin` remote address. + _, err := x.Exec("ALTER TABLE `mirror` DROP COLUMN `remote_address`") + return err +} diff --git a/models/repo/mirror.go b/models/repo/mirror.go index 1fe9afd8e9..c64f9dc734 100644 --- a/models/repo/mirror.go +++ b/models/repo/mirror.go @@ -6,10 +6,14 @@ package repo import ( "context" + "errors" + "net/url" "time" "forgejo.org/models/db" + "forgejo.org/modules/keying" "forgejo.org/modules/log" + "forgejo.org/modules/optional" "forgejo.org/modules/timeutil" "forgejo.org/modules/util" ) @@ -31,7 +35,9 @@ type Mirror struct { LFS bool `xorm:"lfs_enabled NOT NULL DEFAULT false"` LFSEndpoint string `xorm:"lfs_endpoint TEXT"` - RemoteAddress string `xorm:"VARCHAR(2048)"` + // Encrypted remote address w/ credentials; can be NULL if a mirror has not performed a sync since this field was + // introduced, in which case the remote address exists only in the repo's configured git remote on disk. + EncryptedRemoteAddress []byte `xorm:"BLOB NULL"` } func init() { @@ -73,6 +79,71 @@ func (m *Mirror) ScheduleNextUpdate() { } } +// InsertMirror inserts a mirror to database. RemoteAddress must be provided so that it can be encrypted and stored +// during the insert process. +func (m *Mirror) InsertWithAddress(ctx context.Context, addr string) error { + return db.WithTx(ctx, func(ctx context.Context) error { + if _, err := db.GetEngine(ctx).Insert(m); err != nil { + return err + } + return m.UpdateRemoteAddress(ctx, addr) + }) +} + +// Stores a credential-free version of the address in `RemoteAddress`, encrypts the original into `RemoteAddressAuth`, +// and stores both in the database. The ID of the mirror must be known, so this must be done after the mirror is +// inserted. +func (m *Mirror) UpdateRemoteAddress(ctx context.Context, addr string) error { + if m.ID == 0 { + return errors.New("must persist mirror to database before using UpdateRemoteAddress") + } + + m.EncryptedRemoteAddress = keying.PullMirror.Encrypt( + []byte(addr), + keying.ColumnAndID("remote_address_auth", m.ID), + ) + _, err := db.GetEngine(ctx).ID(m.ID).Cols("encrypted_remote_address").Update(m) + return err +} + +// Retrieves the encrypted remote address and decrypts it. Note that this field is expected to be absent for mirrors +// created before the introduction of EncryptedRemoteAddress, in which case credentials are not known to Forgejo +// directly (but may be on-disk in the repository's config file) and None will be returned. +func (m *Mirror) DecryptRemoteAddress() (optional.Option[string], error) { + if m.EncryptedRemoteAddress == nil { + return optional.None[string](), nil + } + + contents, err := keying.PullMirror.Decrypt(m.EncryptedRemoteAddress, keying.ColumnAndID("remote_address_auth", m.ID)) + if err != nil { + return optional.None[string](), err + } + return optional.Some(string(contents)), nil +} + +// Retrieves the remote address but sanitizes it of sensitive credentials. May be absent for mirrors created before the +// introduction of EncryptedRemoteAddress. +func (m *Mirror) SanitizedRemoteAddress() (optional.Option[string], error) { + maybeAddr, err := m.DecryptRemoteAddress() + if err != nil { + return optional.None[string](), err + } else if has, addr := maybeAddr.Get(); has { + parsedURL, err := url.Parse(addr) + if err != nil { + return optional.None[string](), err + } + + // Remove the password if present. Retain the username for consistency with `AddAuthCredentialHelperForRemote` + // which retains the username for the `git clone` command line, which ends up as the remote URL in the mirror's + // git config. + if parsedURL.User != nil { + parsedURL.User = url.User(parsedURL.User.Username()) + } + return optional.Some(parsedURL.String()), nil + } + return optional.None[string](), nil +} + // GetMirrorByRepoID returns mirror information of a repository. func GetMirrorByRepoID(ctx context.Context, repoID int64) (*Mirror, error) { m := &Mirror{RepoID: repoID} @@ -115,9 +186,3 @@ func MirrorsIterate(ctx context.Context, limit int, f func(idx int, bean any) er } return sess.Iterate(new(Mirror), f) } - -// InsertMirror inserts a mirror to database -func InsertMirror(ctx context.Context, mirror *Mirror) error { - _, err := db.GetEngine(ctx).Insert(mirror) - return err -} diff --git a/modules/git/command.go b/modules/git/command.go index bf1d624dbf..4ab081418b 100644 --- a/modules/git/command.go +++ b/modules/git/command.go @@ -10,6 +10,7 @@ import ( "errors" "fmt" "io" + "net/url" "os" "os/exec" "runtime/trace" @@ -446,6 +447,54 @@ func (c *Command) RunStdBytes(opts *RunOpts) (stdout, stderr []byte, runErr RunS return stdoutBuf.Bytes(), stderr, nil } +// If `remoteURL` is a URL with a password in it, add parameters to the git command that will read that password from a +// credential store file, and return the URL that should be used in the command instead of the original, and a cleanup +// function to call to remove the credential file. If `remoteURL` doesn't have a password, then it is returned as-is. +// This function must be invoked on the the git command before the git sub-command -- eg. before the `clone` or `fetch` +// parameter is added to the command's args. +func (c *Command) AddAuthCredentialHelperForRemote(remoteURL string) (commandURL string, cleanup func(), err error) { + parsedFromURL, _ := url.Parse(remoteURL) + + // If the clone URL has credentials, build a credential file for usage by git-credential-store + // to prevent credential leak in the process list. + // https://git-scm.com/docs/git-credential-store#_storage_format + // credential.helper adjustment must be set before the git subcommand + if strings.Contains(remoteURL, "://") && strings.Contains(remoteURL, "@") && parsedFromURL != nil { + credentialsFile, err := os.CreateTemp("", "forgejo-clone-credentials-") + if err != nil { + return "", nil, err + } + credentialsPath := credentialsFile.Name() + + cleanup := func() { + _ = credentialsFile.Close() + if err := util.Remove(credentialsPath); err != nil { + log.Warn("Unable to remove temporary file %q: %v", credentialsPath, err) + } + } + _, err = credentialsFile.Write([]byte(parsedFromURL.String())) + if err != nil { + cleanup() + return "", nil, err + } + err = credentialsFile.Close() + if err != nil { + cleanup() + return "", nil, err + } + + c.AddArguments("-c").AddDynamicArguments("credential.helper=store --file=" + credentialsPath) + + // remove the password from the URL argument + parsedFromURL.User = url.User(parsedFromURL.User.Username()) + commandURL = parsedFromURL.String() + + return commandURL, cleanup, nil + } + + return remoteURL, func() {}, nil +} + // AllowLFSFiltersArgs return globalCommandArgs with lfs filter, it should only be used for tests func AllowLFSFiltersArgs() TrustedCmdArgs { // Now here we should explicitly allow lfs filters to run diff --git a/modules/git/repo.go b/modules/git/repo.go index 6bd03f8e3c..8f9b95f1f2 100644 --- a/modules/git/repo.go +++ b/modules/git/repo.go @@ -18,7 +18,6 @@ import ( "strings" "time" - "forgejo.org/modules/log" "forgejo.org/modules/proxy" "forgejo.org/modules/setting" "forgejo.org/modules/util" @@ -141,44 +140,13 @@ func CloneWithArgs(ctx context.Context, args TrustedCmdArgs, from, to string, op envs = proxy.EnvWithProxy(parsedFromURL) } - fromURL := from - sanitizedFrom := from + sanitizedFrom := util.SanitizeCredentialURLs(from) - // If the clone URL has credentials, build a credential file for usage by git-credential-store - // to prevent credential leak in the process list. - // https://git-scm.com/docs/git-credential-store#_storage_format - // credential.helper adjustment must be set before the git subcommand - if strings.Contains(from, "://") && strings.Contains(from, "@") { - sanitizedFrom = util.SanitizeCredentialURLs(from) - if parsedFromURL != nil { - credentialsFile, err := os.CreateTemp("", "forgejo-clone-credentials-") - if err != nil { - return err - } - credentialsPath := credentialsFile.Name() - - defer func() { - _ = credentialsFile.Close() - if err := util.Remove(credentialsPath); err != nil { - log.Warn("Unable to remove temporary file %q: %v", credentialsPath, err) - } - }() - _, err = credentialsFile.Write([]byte(parsedFromURL.String())) - if err != nil { - return err - } - err = credentialsFile.Close() - if err != nil { - return err - } - - cmd.AddArguments("-c").AddDynamicArguments("credential.helper=store --file=" + credentialsPath) - - // remove the password from the URL argument - parsedFromURL.User = url.User(parsedFromURL.User.Username()) - fromURL = parsedFromURL.String() - } + fromURL, cleanup, err := cmd.AddAuthCredentialHelperForRemote(from) + if err != nil { + return err } + defer cleanup() cmd.AddArguments("clone") diff --git a/modules/keying/keying.go b/modules/keying/keying.go index 751fd77521..14fbaaca98 100644 --- a/modules/keying/keying.go +++ b/modules/keying/keying.go @@ -41,6 +41,8 @@ var ( MigrateTask = deriveKey("migrate_repo_task") // Used for the `webhook` table. Webhook = deriveKey("webhook") + // Used for the `mirror` table. + PullMirror = deriveKey("pullmirror") ) var ( diff --git a/modules/templates/util_misc.go b/modules/templates/util_misc.go index 60f918be47..8e1ef71f5d 100644 --- a/modules/templates/util_misc.go +++ b/modules/templates/util_misc.go @@ -17,12 +17,11 @@ import ( asymkey_model "forgejo.org/models/asymkey" repo_model "forgejo.org/models/repo" user_model "forgejo.org/models/user" - "forgejo.org/modules/git" - giturl "forgejo.org/modules/git/url" "forgejo.org/modules/json" "forgejo.org/modules/log" "forgejo.org/modules/repository" "forgejo.org/modules/svg" + mirror_service "forgejo.org/services/mirror" "github.com/editorconfig/editorconfig-core-go/v2" ) @@ -164,17 +163,11 @@ type remoteAddress struct { Password string } -func mirrorRemoteAddress(ctx context.Context, m *repo_model.Repository, remoteName string) remoteAddress { +func mirrorRemoteAddress(ctx context.Context, mirror *repo_model.Mirror) remoteAddress { ret := remoteAddress{} - remoteURL, err := git.GetRemoteAddress(ctx, m.RepoPath(), remoteName) + u, err := mirror_service.DecryptOrRecoverRemoteAddress(ctx, mirror) if err != nil { - log.Error("GetRemoteURL %v", err) - return ret - } - - u, err := giturl.Parse(remoteURL) - if err != nil { - log.Error("giturl.Parse %v", err) + log.Error("DecryptOrRecoverRemoteAddress %v", err) return ret } diff --git a/routers/web/repo/setting/setting.go b/routers/web/repo/setting/setting.go index f7d0348634..b001cf961e 100644 --- a/routers/web/repo/setting/setting.go +++ b/routers/web/repo/setting/setting.go @@ -462,12 +462,12 @@ func SettingsPost(ctx *context.Context) { return } - u, err := git.GetRemoteURL(ctx, ctx.Repo.Repository.RepoPath(), pullMirror.GetRemoteName()) + u, err := mirror_service.DecryptOrRecoverRemoteAddress(ctx, pullMirror) if err != nil { - ctx.Data["Err_MirrorAddress"] = true - handleSettingRemoteAddrError(ctx, err, form) + ctx.ServerError("DecryptOrRecoverRemoteAddress", err) return } + if u.User != nil && form.MirrorPassword == "" && form.MirrorUsername == u.User.Username() { form.MirrorPassword, _ = u.User.Password() } @@ -482,17 +482,25 @@ func SettingsPost(ctx *context.Context) { return } - if err := mirror_service.UpdateAddress(ctx, pullMirror, address); err != nil { - ctx.ServerError("UpdateAddress", err) - return - } - remoteAddress, err := util.SanitizeURL(address) - if err != nil { + if err := pullMirror.UpdateRemoteAddress(ctx, address); err != nil { ctx.Data["Err_MirrorAddress"] = true handleSettingRemoteAddrError(ctx, err, form) return } - pullMirror.RemoteAddress = remoteAddress + + // Update the unencrypted address stored in the git config, so that future `git fetch` will access the right + // address. pullMirror.RemoteAddress is the sanitized no-creds version from UpdateRemoteAddress. + if maybeSanitizedURL, err := pullMirror.SanitizedRemoteAddress(); err != nil { + ctx.ServerError("SanitizedRemoteAddress", err) + return + } else if has, sanitizedURL := maybeSanitizedURL.Get(); !has { + // SanitizedRemoteAddress must be present after we just stored it + ctx.ServerError("SanitizedRemoteAddress", err) + return + } else if err := mirror_service.UpdateAddress(ctx, pullMirror, sanitizedURL); err != nil { + ctx.ServerError("UpdateAddress", err) + return + } form.LFS = form.LFS && setting.LFS.StartServer diff --git a/services/mirror/mirror_pull.go b/services/mirror/mirror_pull.go index d6e1ff6c51..c5adc6105f 100644 --- a/services/mirror/mirror_pull.go +++ b/services/mirror/mirror_pull.go @@ -31,55 +31,27 @@ const gitShortEmptySha = "0000000" // UpdateAddress writes new address to Git repository and database func UpdateAddress(ctx context.Context, m *repo_model.Mirror, addr string) error { - u, err := giturl.Parse(addr) - if err != nil { - return fmt.Errorf("invalid addr: %v", err) - } - remoteName := m.GetRemoteName() repoPath := m.GetRepository(ctx).RepoPath() - // Remove old remote - _, _, err = git.NewCommand(ctx, "remote", "rm").AddDynamicArguments(remoteName).RunStdString(&git.RunOpts{Dir: repoPath}) - if err != nil && !git.IsRemoteNotExistError(err) { - return err - } - - cmd := git.NewCommand(ctx, "remote", "add").AddDynamicArguments(remoteName).AddArguments("--mirror=fetch").AddDynamicArguments(addr) - if strings.Contains(addr, "://") && strings.Contains(addr, "@") { - cmd.SetDescription(fmt.Sprintf("remote add %s --mirror=fetch %s [repo_path: %s]", remoteName, util.SanitizeCredentialURLs(addr), repoPath)) - } else { - cmd.SetDescription(fmt.Sprintf("remote add %s --mirror=fetch %s [repo_path: %s]", remoteName, addr, repoPath)) - } - _, _, err = cmd.RunStdString(&git.RunOpts{Dir: repoPath}) - if err != nil && !git.IsRemoteNotExistError(err) { + _, _, err := git.NewCommand(ctx, "remote", "set-url"). + AddDynamicArguments(remoteName, addr). + RunStdString(&git.RunOpts{Dir: repoPath}) + if err != nil { return err } if m.Repo.HasWiki() { wikiPath := m.Repo.WikiPath() wikiRemotePath := repo_module.WikiRemoteURL(ctx, addr) - // Remove old remote of wiki - _, _, err = git.NewCommand(ctx, "remote", "rm").AddDynamicArguments(remoteName).RunStdString(&git.RunOpts{Dir: wikiPath}) - if err != nil && !git.IsRemoteNotExistError(err) { - return err - } - - cmd = git.NewCommand(ctx, "remote", "add").AddDynamicArguments(remoteName).AddArguments("--mirror=fetch").AddDynamicArguments(wikiRemotePath) - if strings.Contains(wikiRemotePath, "://") && strings.Contains(wikiRemotePath, "@") { - cmd.SetDescription(fmt.Sprintf("remote add %s --mirror=fetch %s [repo_path: %s]", remoteName, util.SanitizeCredentialURLs(wikiRemotePath), wikiPath)) - } else { - cmd.SetDescription(fmt.Sprintf("remote add %s --mirror=fetch %s [repo_path: %s]", remoteName, wikiRemotePath, wikiPath)) - } - _, _, err = cmd.RunStdString(&git.RunOpts{Dir: wikiPath}) - if err != nil && !git.IsRemoteNotExistError(err) { + _, _, err = git.NewCommand(ctx, "remote", "set-url"). + AddDynamicArguments(remoteName, wikiRemotePath). + RunStdString(&git.RunOpts{Dir: wikiPath}) + if err != nil { return err } } - // erase authentication before storing in database - u.User = nil - m.Repo.OriginalURL = u.String() - return repo_model.UpdateRepositoryCols(ctx, m.Repo, "original_url") + return nil } // mirrorSyncResult contains information of a updated reference. @@ -262,6 +234,46 @@ func checkRecoverableSyncError(stderrMessage string) bool { } } +// Decrypt RemoteAddressAuth from the mirror. If absent on the mirror database table, fallback to the older method where +// credentials are stored in the git config file as the remote's address, encrypt those credentials and store them in +// the database, and wipe them from the git config file so that they're only stored in the one encrypted location in DB. +func DecryptOrRecoverRemoteAddress(ctx context.Context, m *repo_model.Mirror) (*giturl.GitURL, error) { + decryptedRemoteURL, err := m.DecryptRemoteAddress() + if err != nil { + return nil, fmt.Errorf("failed to decrypt remote address: %w", err) + } + + if has, url := decryptedRemoteURL.Get(); has { + remoteURL, err := giturl.Parse(url) + if err != nil { + return nil, fmt.Errorf("failed to parse decrypted remote address: %w", err) + } + return remoteURL, nil + } + + // fallback to reading remote URL from the git config file for repos that predate DecryptRemoteAddress + repoPath := m.GetRepository(ctx).RepoPath() + remoteURL, err := git.GetRemoteURL(ctx, repoPath, m.GetRemoteName()) + if err != nil { + return nil, fmt.Errorf("GetRemoteAddress error: %w", err) + } + + // Store the full address in the database + if err := m.UpdateRemoteAddress(ctx, remoteURL.URL.String()); err != nil { + return nil, fmt.Errorf("UpdateRemoteAddress error: %w", err) + } + + // Update the git config file to just contain the sanitized address + if maybeSanitizedURL, err := m.SanitizedRemoteAddress(); err != nil { + return nil, fmt.Errorf("SanitizedRemoteAddress error: %w", err) + } else if has, sanitizedURL := maybeSanitizedURL.Get(); !has { + return nil, fmt.Errorf("SanitizedRemoteAddress must be present after we just stored it, but had error: %w", err) + } else if err := UpdateAddress(ctx, m, sanitizedURL); err != nil { + return nil, fmt.Errorf("UpdateAddress error: %w", err) + } + return remoteURL, nil +} + // runSync returns true if sync finished without error. func runSync(ctx context.Context, m *repo_model.Mirror) ([]*mirrorSyncResult, bool) { repoPath := m.Repo.RepoPath() @@ -270,19 +282,29 @@ func runSync(ctx context.Context, m *repo_model.Mirror) ([]*mirrorSyncResult, bo log.Trace("SyncMirrors [repo: %-v]: running git remote update...", m.Repo) + remoteURL, err := DecryptOrRecoverRemoteAddress(ctx, m) + if err != nil { + log.Error("SyncMirrors [repo: %-v]: failed to get remote address: %v", m.Repo, err) + return nil, false + } + + cmd := git.NewCommand(ctx) + + // Setup credential helper to authenticate the fetch; needs to occur before the `fetch` arg. + _, credCleanup, err := cmd.AddAuthCredentialHelperForRemote(remoteURL.URL.String()) + if err != nil { + log.Error("SyncMirrors [repo: %-v]: AddAuthCredentialHelperForRemote Error %v", m.Repo, err) + return nil, false + } + defer credCleanup() + // use fetch but not remote update because git fetch support --tags but remote update doesn't - cmd := git.NewCommand(ctx, "fetch") + cmd.AddArguments("fetch") if m.EnablePrune { cmd.AddArguments("--prune") } cmd.AddArguments("--tags").AddDynamicArguments(m.GetRemoteName()) - remoteURL, remoteErr := git.GetRemoteURL(ctx, repoPath, m.GetRemoteName()) - if remoteErr != nil { - log.Error("SyncMirrors [repo: %-v]: GetRemoteAddress Error %v", m.Repo, remoteErr) - return nil, false - } - envs := proxy.EnvWithProxy(remoteURL.URL) stdoutBuilder := strings.Builder{} diff --git a/services/repository/migrate.go b/services/repository/migrate.go index 80f5d68231..46b48d02d3 100644 --- a/services/repository/migrate.go +++ b/services/repository/migrate.go @@ -182,17 +182,12 @@ func MigrateRepositoryGitData(ctx context.Context, u *user_model.User, defer committer.Close() if opts.Mirror { - remoteAddress, err := util.SanitizeURL(opts.CloneAddr) - if err != nil { - return repo, err - } mirrorModel := repo_model.Mirror{ RepoID: repo.ID, Interval: setting.Mirror.DefaultInterval, EnablePrune: true, NextUpdateUnix: timeutil.TimeStampNow().AddDuration(setting.Mirror.DefaultInterval), LFS: opts.LFS, - RemoteAddress: remoteAddress, } if opts.LFS { mirrorModel.LFSEndpoint = opts.LFSEndpoint @@ -217,7 +212,7 @@ func MigrateRepositoryGitData(ctx context.Context, u *user_model.User, } } - if err = repo_model.InsertMirror(ctx, &mirrorModel); err != nil { + if err = mirrorModel.InsertWithAddress(ctx, opts.CloneAddr); err != nil { return repo, fmt.Errorf("InsertOne: %w", err) } diff --git a/templates/repo/header.tmpl b/templates/repo/header.tmpl index 513758a149..44254aae82 100644 --- a/templates/repo/header.tmpl +++ b/templates/repo/header.tmpl @@ -77,9 +77,10 @@ {{end}}
{{if $.PullMirror}} + {{$address := MirrorRemoteAddress $.Context $.PullMirror}}
{{ctx.Locale.Tr "repo.mirror_from"}} - {{$.PullMirror.RemoteAddress}} + {{$address.Address}} {{if $.PullMirror.UpdatedUnix}}{{ctx.Locale.Tr "repo.mirror_sync"}} {{DateUtils.TimeSince $.PullMirror.UpdatedUnix}}{{end}}
{{end}} diff --git a/templates/repo/settings/options.tmpl b/templates/repo/settings/options.tmpl index fa25f4630a..137be0f334 100644 --- a/templates/repo/settings/options.tmpl +++ b/templates/repo/settings/options.tmpl @@ -148,9 +148,10 @@ {{else if $isWorkingPullMirror}} + {{$address := MirrorRemoteAddress $.Context .PullMirror}} - {{.PullMirror.RemoteAddress}} + {{$address.Address}} {{ctx.Locale.Tr "repo.settings.mirror_settings.direction.pull"}} {{DateUtils.FullTime .PullMirror.UpdatedUnix}} @@ -176,7 +177,6 @@
- {{$address := MirrorRemoteAddress $.Context .Repository .PullMirror.GetRemoteName}}
diff --git a/tests/integration/mirror_pull_test.go b/tests/integration/mirror_pull_test.go index 03d4bdcf92..a0de586aaa 100644 --- a/tests/integration/mirror_pull_test.go +++ b/tests/integration/mirror_pull_test.go @@ -5,9 +5,16 @@ package integration import ( + "fmt" "net/http" + "net/url" + "os" + "path" + "strings" "testing" + "time" + "forgejo.org/models/auth" "forgejo.org/models/db" repo_model "forgejo.org/models/repo" "forgejo.org/models/unittest" @@ -15,94 +22,546 @@ import ( "forgejo.org/modules/git" "forgejo.org/modules/gitrepo" "forgejo.org/modules/migration" + "forgejo.org/modules/optional" + "forgejo.org/modules/process" + "forgejo.org/modules/setting" + "forgejo.org/modules/structs" app_context "forgejo.org/services/context" + "forgejo.org/services/forms" + "forgejo.org/services/migrations" mirror_service "forgejo.org/services/mirror" release_service "forgejo.org/services/release" repo_service "forgejo.org/services/repository" + files_service "forgejo.org/services/repository/files" "forgejo.org/tests" + "github.com/PuerkitoBio/goquery" + "github.com/google/uuid" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestMirrorPull(t *testing.T) { - defer tests.PrepareTestEnv(t)() + t.Run("Basic", func(t *testing.T) { + defer tests.PrepareTestEnv(t)() - user := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 2}) - repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 1}) - repoPath := repo_model.RepoPath(user.Name, repo.Name) + user := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 2}) + repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 1}) + repoPath := repo_model.RepoPath(user.Name, repo.Name) - opts := migration.MigrateOptions{ - RepoName: "test_mirror", - Description: "Test mirror", - Private: false, - Mirror: true, - CloneAddr: repoPath, - Wiki: true, - Releases: false, - } + opts := migration.MigrateOptions{ + RepoName: "test_mirror", + Description: "Test mirror", + Private: false, + Mirror: true, + CloneAddr: repoPath, + Wiki: true, + Releases: false, + } - mirrorRepo, err := repo_service.CreateRepositoryDirectly(db.DefaultContext, user, user, repo_service.CreateRepoOptions{ - Name: opts.RepoName, - Description: opts.Description, - IsPrivate: opts.Private, - IsMirror: opts.Mirror, - Status: repo_model.RepositoryBeingMigrated, + mirrorRepo, err := repo_service.CreateRepositoryDirectly(db.DefaultContext, user, user, repo_service.CreateRepoOptions{ + Name: opts.RepoName, + Description: opts.Description, + IsPrivate: opts.Private, + IsMirror: opts.Mirror, + Status: repo_model.RepositoryBeingMigrated, + }) + require.NoError(t, err) + assert.True(t, mirrorRepo.IsMirror, "expected pull-mirror repo to be marked as a mirror immediately after its creation") + + ctx := t.Context() + + mirror, err := repo_service.MigrateRepositoryGitData(ctx, user, mirrorRepo, opts, nil) + require.NoError(t, err) + + gitRepo, err := gitrepo.OpenRepository(git.DefaultContext, repo) + require.NoError(t, err) + defer gitRepo.Close() + + findOptions := repo_model.FindReleasesOptions{ + IncludeDrafts: true, + IncludeTags: true, + RepoID: mirror.ID, + } + initCount, err := db.Count[repo_model.Release](db.DefaultContext, findOptions) + require.NoError(t, err) + + require.NoError(t, release_service.CreateRelease(gitRepo, &repo_model.Release{ + RepoID: repo.ID, + Repo: repo, + PublisherID: user.ID, + Publisher: user, + TagName: "v0.2", + Target: "master", + Title: "v0.2 is released", + Note: "v0.2 is released", + IsDraft: false, + IsPrerelease: false, + IsTag: true, + }, "", []*release_service.AttachmentChange{})) + + _, err = repo_model.GetMirrorByRepoID(ctx, mirror.ID) + require.NoError(t, err) + + ok := mirror_service.SyncPullMirror(ctx, mirror.ID) + assert.True(t, ok) + + count, err := db.Count[repo_model.Release](db.DefaultContext, findOptions) + require.NoError(t, err) + assert.Equal(t, initCount+1, count) + + release, err := repo_model.GetRelease(db.DefaultContext, repo.ID, "v0.2") + require.NoError(t, err) + require.NoError(t, release_service.DeleteReleaseByID(ctx, repo, release, user, true)) + + ok = mirror_service.SyncPullMirror(ctx, mirror.ID) + assert.True(t, ok) + + count, err = db.Count[repo_model.Release](db.DefaultContext, findOptions) + require.NoError(t, err) + assert.Equal(t, initCount, count) }) - require.NoError(t, err) - assert.True(t, mirrorRepo.IsMirror, "expected pull-mirror repo to be marked as a mirror immediately after its creation") - ctx := t.Context() - - mirror, err := repo_service.MigrateRepositoryGitData(ctx, user, mirrorRepo, opts, nil) - require.NoError(t, err) - - gitRepo, err := gitrepo.OpenRepository(git.DefaultContext, repo) - require.NoError(t, err) - defer gitRepo.Close() - - findOptions := repo_model.FindReleasesOptions{ - IncludeDrafts: true, - IncludeTags: true, - RepoID: mirror.ID, + // How will we interact with the pull mirror during this test? + interactionMethod := []struct { + name string + useAPI bool + createPullMirror func(t *testing.T, sourceRepo *repo_model.Repository, authenticate bool) (repoName string) + verifyMirrorDetails func(t *testing.T, sourceRepo *repo_model.Repository, mirrorName string, authenticate bool) + triggerPullMirror func(t *testing.T, mirrorName string) + changePullMirrorCredentials func(t *testing.T, sourceRepo *repo_model.Repository, mirrorName string, authenticate bool) + changePullMirrorAddress func(t *testing.T, sourceRepo *repo_model.Repository, mirrorName string, authenticate bool) + }{ + { + name: "API", + useAPI: true, + createPullMirror: createPullMirrorViaAPI, + triggerPullMirror: triggerPullMirrorViaAPI, + verifyMirrorDetails: func(t *testing.T, sourceRepo *repo_model.Repository, mirrorName string, authenticate bool) { + // API provides no visibility into a repo's mirror settings right now + }, + }, + { + name: "Web", + useAPI: false, + createPullMirror: createPullMirrorViaWeb, + triggerPullMirror: triggerPullMirrorViaWeb, + verifyMirrorDetails: verifyPullMirrorViaWeb, + changePullMirrorCredentials: changePullMirrorCredentialsViaWeb, + changePullMirrorAddress: changePullMirrorCredentialsViaWeb, // one endpoint, so same as creds + }, } - initCount, err := db.Count[repo_model.Release](db.DefaultContext, findOptions) + + mirrorConfiguration := []struct { + name string + privateSource bool + }{ + { + name: "HTTP Without Auth", + }, + { + name: "HTTP With Auth", + privateSource: true, + }, + } + + // Not using MockVariableValue due to need to undo `migrations.Init()` + prev := setting.Migrations.AllowedDomains + setting.Migrations.AllowedDomains = "localhost" + migrations.Init() // reinitialize for changed allowList + defer func() { + setting.Migrations.AllowedDomains = prev + migrations.Init() // reinitialize for changed allowList + }() + + onApplicationRun(t, func(t *testing.T, u *url.URL) { + for _, im := range interactionMethod { + for _, mc := range mirrorConfiguration { + t.Run(fmt.Sprintf("%s/%s", im.name, mc.name), func(t *testing.T) { + defer tests.PrintCurrentTest(t)() + + user2 := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 2}) + + // Create the source repository that will be mirrored. + sourceRepo, sourceRepoSha, cleanupSource := tests.CreateDeclarativeRepoWithOptions(t, user2, + tests.DeclarativeRepoOptions{ + IsPrivate: optional.Some(mc.privateSource), + Files: optional.Some([]*files_service.ChangeRepoFile{ + { + Operation: "create", + TreePath: "docs.md", + ContentReader: strings.NewReader("hello, world"), + }, + }), + }, + ) + defer cleanupSource() + require.NotEmpty(t, sourceRepoSha) + + // Create pull mirror + mirror := im.createPullMirror(t, sourceRepo, mc.privateSource) + verifyPullMirrorContents(t, mirror, sourceRepoSha) + verifyPullMirrorConfig(t, mirror, sourceRepo, mc.privateSource) + im.verifyMirrorDetails(t, sourceRepo, mirror, mc.privateSource) + + // Push a change to the source and refresh the mirror + sourceRepoSha = changePullMirrorSource(t, sourceRepo, sourceRepoSha) + im.triggerPullMirror(t, mirror) + waitForPullMirror(t, mirror, sourceRepoSha) + + // Test changing the mirror's authentication method (if available) + if mc.privateSource && im.changePullMirrorCredentials != nil { + sourceRepoSha = changePullMirrorSource(t, sourceRepo, sourceRepoSha) + im.changePullMirrorCredentials(t, sourceRepo, mirror, mc.privateSource) + verifyPullMirrorConfig(t, mirror, sourceRepo, mc.privateSource) + im.verifyMirrorDetails(t, sourceRepo, mirror, mc.privateSource) + im.triggerPullMirror(t, mirror) + waitForPullMirror(t, mirror, sourceRepoSha) + } + + // Test changing the mirror's address (if available) + if im.changePullMirrorAddress != nil { + sourceRepo = renamePullMirrorSourceRepo(t, sourceRepo) + sourceRepoSha = changePullMirrorSource(t, sourceRepo, sourceRepoSha) + im.changePullMirrorAddress(t, sourceRepo, mirror, mc.privateSource) + verifyPullMirrorConfig(t, mirror, sourceRepo, mc.privateSource) + im.verifyMirrorDetails(t, sourceRepo, mirror, mc.privateSource) + im.triggerPullMirror(t, mirror) + waitForPullMirror(t, mirror, sourceRepoSha) + } + }) + } + } + }) + + t.Run("migrate from repo config credentials", func(t *testing.T) { + defer tests.PrintCurrentTest(t)() + + user2 := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 2}) + + mirrorRepo, _, cleanupMirror := tests.CreateDeclarativeRepoWithOptions(t, user2, + tests.DeclarativeRepoOptions{}, + ) + defer cleanupMirror() + + // Write to the repo a config file that would have plausibly existed before EncryptedRemoteAddress was + // introduced: + repoPath := mirrorRepo.RepoPath() + err := os.WriteFile(path.Join(repoPath, "config"), []byte(` +[core] + repositoryformatversion = 0 + filemode = true + bare = true +[remote "origin"] + url = https://user:password@example.com/org/repo.git + tagOpt = --no-tags + fetch = +refs/*:refs/* + mirror = true + fetch = +refs/tags/*:refs/tags/* +`), 0o644) + require.NoError(t, err) + + // Create a Mirror record without an EncryptedRemoteAddress: + mirror := &repo_model.Mirror{ + RepoID: mirrorRepo.ID, + Interval: 8 * time.Hour, + EnablePrune: true, + } + _, err = db.GetEngine(t.Context()).Insert(mirror) + require.NoError(t, err) + require.Nil(t, mirror.EncryptedRemoteAddress) + + remoteURL, err := mirror_service.DecryptOrRecoverRemoteAddress(t.Context(), mirror) + require.NoError(t, err) + assert.Equal(t, "https://user:password@example.com/org/repo.git", remoteURL.URL.String()) + + // EncryptedRemoteAddress should now be populated from the recovery: + assert.NotNil(t, mirror.EncryptedRemoteAddress) + maybeDecryptedURL, err := mirror.DecryptRemoteAddress() + require.NoError(t, err) + has, decryptedURL := maybeDecryptedURL.Get() + require.True(t, has) + assert.Equal(t, "https://user:password@example.com/org/repo.git", decryptedURL) + + // SanitizedRemoteAddress can be fetched: + maybeSanitizedURL, err := mirror.SanitizedRemoteAddress() + require.NoError(t, err) + has, sanitizedURL := maybeSanitizedURL.Get() + require.True(t, has) + assert.Equal(t, "https://user@example.com/org/repo.git", sanitizedURL) + + // Database record is updated in the database: + refetchMirror := unittest.AssertExistsAndLoadBean(t, &repo_model.Mirror{RepoID: mirrorRepo.ID}) + assert.Equal(t, mirror.EncryptedRemoteAddress, refetchMirror.EncryptedRemoteAddress) + + // Config file is rewritten: + config, err := os.ReadFile(path.Join(repoPath, "config")) + require.NoError(t, err) + assert.Equal(t, ` +[core] + repositoryformatversion = 0 + filemode = true + bare = true +[remote "origin"] + url = https://user@example.com/org/repo.git + tagOpt = --no-tags + fetch = +refs/*:refs/* + mirror = true + fetch = +refs/tags/*:refs/tags/* +`, string(config)) + }) +} + +func createPullMirrorViaWeb(t *testing.T, sourceRepo *repo_model.Repository, authenticate bool) string { + session := loginUser(t, "user2") + + mirrorName := fmt.Sprintf("pullmirror-%s", sourceRepo.Name) + form := &forms.MigrateRepoForm{ + CloneAddr: sourceRepo.CloneLink().HTTPS, + Service: structs.PlainGitService, + UID: 2, + RepoName: mirrorName, + Mirror: true, + } + if authenticate { + form.AuthUsername = "user2" + form.AuthPassword = getTokenForLoggedInUser(t, session, auth.AccessTokenScopeReadRepository) + } + + resp := session.MakeRequest(t, + NewRequestWithJSON(t, "POST", "/repo/migrate", form), + http.StatusSeeOther) + location := resp.Header().Get("Location") + assert.Equal(t, fmt.Sprintf("/user2/pullmirror-%s", sourceRepo.Name), location) + + var lastBody string + if !assert.Eventuallyf(t, + func() bool { + resp := session.MakeRequest(t, + NewRequest(t, "GET", location), + http.StatusOK) + body := resp.Body.String() + lastBody = body + // Looking for the repo page to be fully populated indicating that the migration is complete: + // Check that the first commit message is present: + if !strings.Contains(body, "Initial commit") { + return false + } + // Check that the fork button is present: + if !strings.Contains(body, fmt.Sprintf("/user2/%s/fork", mirrorName)) { + return false + } + return true + }, + 15*time.Second, 1*time.Second, + "expected migration to complete and repo page to render") { + t.Logf("last received page body: %s", lastBody) + } + + return mirrorName +} + +func createPullMirrorViaAPI(t *testing.T, sourceRepo *repo_model.Repository, authenticate bool) string { + session := loginUser(t, "user2") + apiToken := getTokenForLoggedInUser(t, session, auth.AccessTokenScopeWriteRepository) + + mirrorName := fmt.Sprintf("pullmirror-%s", sourceRepo.Name) + form := &structs.MigrateRepoOptions{ + CloneAddr: sourceRepo.CloneLink().HTTPS, + Service: "git", + RepoOwner: "user2", + RepoName: mirrorName, + Mirror: true, + } + if authenticate { + form.AuthUsername = "user2" + form.AuthPassword = getTokenForLoggedInUser(t, session, auth.AccessTokenScopeReadRepository) + } + + resp := session.MakeRequest(t, + NewRequestWithJSON(t, "POST", "/api/v1/repos/migrate", form).AddTokenAuth(apiToken), + http.StatusCreated) + var repo structs.Repository + DecodeJSON(t, resp, &repo) + assert.NotNil(t, repo) + assert.True(t, repo.Mirror) + assert.False(t, repo.Empty) + + return mirrorName +} + +func verifyPullMirrorViaWeb(t *testing.T, sourceRepo *repo_model.Repository, mirrorName string, authenticate bool) { + session := loginUser(t, "user2") + resp := session.MakeRequest(t, + NewRequestf(t, "GET", "/user2/%s/settings", mirrorName), + http.StatusOK) + htmlDoc := NewHTMLParser(t, resp.Body) + htmlDoc.AssertAttrEqual(t, "#mirror_address", "value", sourceRepo.CloneLink().HTTPS) + if authenticate { + htmlDoc.AssertAttrEqual(t, "#mirror_username", "value", "user2") + htmlDoc.AssertAttrEqual(t, "#mirror_password", "value", "") + htmlDoc.AssertAttrEqual(t, "#mirror_password", "placeholder", "(Unchanged)") + } else { + htmlDoc.AssertAttrEqual(t, "#mirror_username", "value", "") + htmlDoc.AssertAttrEqual(t, "#mirror_password", "value", "") + htmlDoc.AssertAttrEqual(t, "#mirror_password", "placeholder", "(Unset)") + } + + resp = session.MakeRequest(t, + NewRequestf(t, "GET", "/user2/%s", mirrorName), + http.StatusOK) + htmlDoc = NewHTMLParser(t, resp.Body) + htmlDoc.AssertElementPredicate(t, ".fork-flag", func(selection *goquery.Selection) bool { + text := strings.TrimSpace(selection.Text()) + assert.Contains(t, text, "mirror of") + assert.Contains(t, text, sourceRepo.CloneLink().HTTPS) + return true + }) +} + +func triggerPullMirrorViaWeb(t *testing.T, mirrorName string) { + session := loginUser(t, "user2") + + resp := session.MakeRequest(t, + NewRequestWithValues(t, "POST", fmt.Sprintf("/user2/%s/settings", mirrorName), map[string]string{"action": "mirror-sync"}), + http.StatusSeeOther) + location := resp.Header().Get("Location") + assert.Equal(t, fmt.Sprintf("/user2/%s/settings", mirrorName), location) +} + +func triggerPullMirrorViaAPI(t *testing.T, mirrorName string) { + session := loginUser(t, "user2") + apiToken := getTokenForLoggedInUser(t, session, auth.AccessTokenScopeWriteRepository) + + // Trigger sync... + session.MakeRequest(t, + NewRequestf(t, "POST", "/api/v1/repos/user2/%s/mirror-sync", mirrorName).AddTokenAuth(apiToken), + http.StatusOK) +} + +func changePullMirrorCredentialsViaWeb(t *testing.T, sourceRepo *repo_model.Repository, mirrorName string, authenticate bool) { + session := loginUser(t, "user2") + + form := map[string]string{ + "action": "mirror", + "enable_prune": "on", + "interval": "8h0m0s", + "mirror_address": sourceRepo.CloneLink().HTTPS, + } + if authenticate { + form["mirror_username"] = "user2" + form["mirror_password"] = getTokenForLoggedInUser(t, session, auth.AccessTokenScopeReadRepository) + } + + resp := session.MakeRequest(t, + NewRequestWithValues(t, "POST", fmt.Sprintf("/user2/%s/settings", mirrorName), form), + http.StatusSeeOther) + location := resp.Header().Get("Location") + assert.Equal(t, fmt.Sprintf("/user2/%s/settings", mirrorName), location) +} + +func verifyPullMirrorContents(t *testing.T, mirrorName, expectedSha string) { + session := loginUser(t, "user2") + apiToken := getTokenForLoggedInUser(t, session, auth.AccessTokenScopeReadRepository) + resp := session.MakeRequest(t, + NewRequest(t, "GET", fmt.Sprintf("/api/v1/repos/user2/%s/commits?sha=main&limit=1", mirrorName)).AddTokenAuth(apiToken), + http.StatusOK) + var commits []*structs.Commit + DecodeJSON(t, resp, &commits) + require.Len(t, commits, 1) + assert.Equal(t, expectedSha, commits[0].SHA) +} + +func waitForPullMirror(t *testing.T, mirrorName, expectedSha string) { + session := loginUser(t, "user2") + apiToken := getTokenForLoggedInUser(t, session, auth.AccessTokenScopeReadRepository) + + var commits []*structs.Commit + if !assert.Eventually(t, + func() bool { + resp := session.MakeRequest(t, + NewRequest(t, "GET", fmt.Sprintf("/api/v1/repos/user2/%s/commits?sha=main&limit=1", mirrorName)).AddTokenAuth(apiToken), + http.StatusOK) + DecodeJSON(t, resp, &commits) + require.Len(t, commits, 1) + return commits[0].SHA == expectedSha + }, + 15*time.Second, 1*time.Second) { + t.Logf("sync was supposed to bring repo to commit %s, but observed commits = %#v", expectedSha, commits) + } +} + +func getGitConfig(t *testing.T, configFile, configPath string) string { + stdout, stderr, err := process.GetManager().Exec("getGitConfig", "git", "config", "get", "--file", configFile, configPath) + require.NoError(t, err, "fetch config %s failed: git stderr: %s", configPath, stderr) + return strings.TrimSpace(stdout) +} + +func verifyPullMirrorConfig(t *testing.T, mirrorName string, sourceRepo *repo_model.Repository, authenticate bool) { + mirrorRepo, err := repo_model.GetRepositoryByOwnerAndName(t.Context(), "user2", mirrorName) require.NoError(t, err) - require.NoError(t, release_service.CreateRelease(gitRepo, &repo_model.Release{ - RepoID: repo.ID, - Repo: repo, - PublisherID: user.ID, - Publisher: user, - TagName: "v0.2", - Target: "master", - Title: "v0.2 is released", - Note: "v0.2 is released", - IsDraft: false, - IsPrerelease: false, - IsTag: true, - }, "", []*release_service.AttachmentChange{})) + repoPath := mirrorRepo.RepoPath() + configPath := path.Join(repoPath, "config") - _, err = repo_model.GetMirrorByRepoID(ctx, mirror.ID) + expectedURL := sourceRepo.CloneLink().HTTPS + if authenticate { + expectedURL = strings.Replace(expectedURL, "http://", "http://user2@", 1) + } + assert.Equal(t, expectedURL, getGitConfig(t, configPath, "remote.origin.url")) + assert.Equal(t, "true", getGitConfig(t, configPath, "remote.origin.mirror")) + assert.Equal(t, "+refs/tags/*:refs/tags/*", getGitConfig(t, configPath, "remote.origin.fetch")) +} + +func changePullMirrorSource(t *testing.T, sourceRepo *repo_model.Repository, sourceRepoSha string) string { + user2 := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 2}) + resp, err := files_service.ChangeRepoFiles(git.DefaultContext, sourceRepo, user2, + &files_service.ChangeRepoFilesOptions{ + Files: []*files_service.ChangeRepoFile{ + { + Operation: "update", + TreePath: "docs.md", + ContentReader: strings.NewReader(uuid.NewString()), + }, + }, + Message: "add files", + OldBranch: "main", + NewBranch: "main", + Author: &files_service.IdentityOptions{ + Name: user2.Name, + Email: user2.Email, + }, + Committer: &files_service.IdentityOptions{ + Name: user2.Name, + Email: user2.Email, + }, + Dates: &files_service.CommitDateOptions{ + Author: time.Now(), + Committer: time.Now(), + }, + LastCommitID: sourceRepoSha, + }) require.NoError(t, err) + assert.NotEmpty(t, resp) + return resp.Commit.SHA +} - ok := mirror_service.SyncPullMirror(ctx, mirror.ID) - assert.True(t, ok) +func renamePullMirrorSourceRepo(t *testing.T, sourceRepo *repo_model.Repository) *repo_model.Repository { + session := loginUser(t, "user2") + apiToken := getTokenForLoggedInUser(t, session, auth.AccessTokenScopeWriteRepository) - count, err := db.Count[repo_model.Release](db.DefaultContext, findOptions) - require.NoError(t, err) - assert.Equal(t, initCount+1, count) + newName := uuid.NewString() + session.MakeRequest(t, + NewRequestWithJSON(t, "PATCH", fmt.Sprintf("/api/v1/repos/user2/%s", sourceRepo.Name), + &structs.EditRepoOption{ + Name: &newName, + }).AddTokenAuth(apiToken), + http.StatusOK) - release, err := repo_model.GetRelease(db.DefaultContext, repo.ID, "v0.2") - require.NoError(t, err) - require.NoError(t, release_service.DeleteReleaseByID(ctx, repo, release, user, true)) - - ok = mirror_service.SyncPullMirror(ctx, mirror.ID) - assert.True(t, ok) - - count, err = db.Count[repo_model.Release](db.DefaultContext, findOptions) - require.NoError(t, err) - assert.Equal(t, initCount, count) + newRepo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: sourceRepo.ID}) + assert.Equal(t, newRepo.Name, newName) + assert.NotEqual(t, newRepo.CloneLink().HTTPS, sourceRepo.CloneLink().HTTPS) // should have changed to new name + return newRepo } func TestPullMirrorRedactCredentials(t *testing.T) { From 2d2029c5982e898b39e826a5eee76d6107d12b50 Mon Sep 17 00:00:00 2001 From: limiting-factor Date: Sat, 4 Apr 2026 16:29:14 +0200 Subject: [PATCH 082/352] tests: make buffer log writer thread safe (#11962) When two goroutines attempt to access the content of the buffer log writer, they must be made thread safe with a write mutex. The buffer log writer is only used in testing. ## Checklist ### Tests for Go changes - I added test coverage for Go changes... - [x] in their respective `*_test.go` for unit tests. - [ ] in the `tests/integration` directory if it involves interactions with a live Forgejo server. - I ran... - [ ] `make pr-go` before pushing ### Documentation - [ ] I created a pull request [to the documentation](https://codeberg.org/forgejo/docs) to explain to Forgejo users how to use this change. - [x] I did not document these changes and I do not expect someone else to do it. ### Release notes - [ ] This change will be noticed by a Forgejo user or admin (feature, bug fix, performance, etc.). I suggest to include a release note for this change. - [x] This change is not visible to a Forgejo user or admin (refactor, dependency upgrade, etc.). I think there is no need to add a release note for this change. *The decision if the pull request will be shown in the release notes is up to the mergers / release team.* The content of the `release-notes/.md` file will serve as the basis for the release notes. If the file does not exist, the title of the pull request will be used instead. Reviewed-on: https://codeberg.org/forgejo/forgejo/pulls/11962 Reviewed-by: Mathieu Fenniak Co-authored-by: limiting-factor Co-committed-by: limiting-factor --- .deadcode-out | 3 ++ modules/log/event_writer_buffer.go | 42 ++++++++++++++++++++----- modules/log/event_writer_buffer_test.go | 12 +++---- modules/log/logger_impl_test.go | 2 +- 4 files changed, 44 insertions(+), 15 deletions(-) diff --git a/.deadcode-out b/.deadcode-out index 45ad117ccd..0c44d2bdfd 100644 --- a/.deadcode-out +++ b/.deadcode-out @@ -134,6 +134,9 @@ forgejo.org/modules/json StdJSON.Indent forgejo.org/modules/log + eventWriterBuffer.Close + eventWriterBuffer.Write + eventWriterBuffer.GetString NewEventWriterBuffer forgejo.org/modules/markup diff --git a/modules/log/event_writer_buffer.go b/modules/log/event_writer_buffer.go index 28857c2189..a7557618a8 100644 --- a/modules/log/event_writer_buffer.go +++ b/modules/log/event_writer_buffer.go @@ -5,18 +5,44 @@ package log import ( "bytes" + "sync" ) -type EventWriterBuffer struct { - *EventWriterBaseImpl - Buffer *bytes.Buffer +type EventWriterBuffer interface { + EventWriter + GetString() string } -var _ EventWriter = (*EventWriterBuffer)(nil) +type eventWriterBuffer struct { + *EventWriterBaseImpl + buffer *bytes.Buffer + mu sync.RWMutex +} -func NewEventWriterBuffer(name string, mode WriterMode) *EventWriterBuffer { - w := &EventWriterBuffer{EventWriterBaseImpl: NewEventWriterBase(name, "buffer", mode)} - w.Buffer = new(bytes.Buffer) - w.OutputWriteCloser = nopCloser{w.Buffer} +var _ EventWriterBuffer = (*eventWriterBuffer)(nil) + +func (*eventWriterBuffer) Close() error { + return nil +} + +func (o *eventWriterBuffer) Write(p []byte) (n int, err error) { + o.mu.Lock() + defer o.mu.Unlock() + return o.buffer.Write(p) +} + +func (o *eventWriterBuffer) GetString() string { + o.mu.Lock() + defer o.mu.Unlock() + b := o.buffer.Bytes() + s := make([]byte, len(b)) + copy(s, b) + return string(s) +} + +func NewEventWriterBuffer(name string, mode WriterMode) EventWriter { + w := &eventWriterBuffer{EventWriterBaseImpl: NewEventWriterBase(name, "buffer", mode)} + w.buffer = new(bytes.Buffer) + w.OutputWriteCloser = w return w } diff --git a/modules/log/event_writer_buffer_test.go b/modules/log/event_writer_buffer_test.go index d1e37c3673..ac0759ad0b 100644 --- a/modules/log/event_writer_buffer_test.go +++ b/modules/log/event_writer_buffer_test.go @@ -29,7 +29,7 @@ func TestBufferLogger(t *testing.T) { MsgSimpleText: expected, }) logger.Close() - assert.Contains(t, bufferWriter.Buffer.String(), expected) + assert.Contains(t, bufferWriter.(log.EventWriterBuffer).GetString(), expected) } func TestBufferLoggerWithExclusion(t *testing.T) { @@ -41,7 +41,7 @@ func TestBufferLoggerWithExclusion(t *testing.T) { Level: level, Prefix: prefix, Exclusion: message, - }) + }).(log.EventWriterBuffer) logger := log.NewLoggerWithWriters(t.Context(), "test", bufferWriter) @@ -50,7 +50,7 @@ func TestBufferLoggerWithExclusion(t *testing.T) { MsgSimpleText: message, }) logger.Close() - assert.NotContains(t, bufferWriter.Buffer.String(), message) + assert.NotContains(t, bufferWriter.GetString(), message) } func TestBufferLoggerWithExpressionAndExclusion(t *testing.T) { @@ -64,7 +64,7 @@ func TestBufferLoggerWithExpressionAndExclusion(t *testing.T) { Prefix: prefix, Expression: expression, Exclusion: exclusion, - }) + }).(log.EventWriterBuffer) logger := log.NewLoggerWithWriters(t.Context(), "test", bufferWriter) @@ -74,6 +74,6 @@ func TestBufferLoggerWithExpressionAndExclusion(t *testing.T) { logger.SendLogEvent(&log.Event{Level: log.INFO, MsgSimpleText: "none"}) logger.Close() - assert.Contains(t, bufferWriter.Buffer.String(), "foo expression") - assert.NotContains(t, bufferWriter.Buffer.String(), "bar") + assert.Contains(t, bufferWriter.GetString(), "foo expression") + assert.NotContains(t, bufferWriter.GetString(), "bar") } diff --git a/modules/log/logger_impl_test.go b/modules/log/logger_impl_test.go index 59276a83f4..1d77ae0c25 100644 --- a/modules/log/logger_impl_test.go +++ b/modules/log/logger_impl_test.go @@ -23,5 +23,5 @@ func TestLog(t *testing.T) { testGeneric(logger, "I'm the generic value!") logger.Close() - assert.Contains(t, bufferWriter.Buffer.String(), ".../logger_impl_test.go:13:testGeneric() [I] Just testing the logging of a generic function I'm the generic value!") + assert.Contains(t, bufferWriter.(EventWriterBuffer).GetString(), ".../logger_impl_test.go:13:testGeneric() [I] Just testing the logging of a generic function I'm the generic value!") } From df86b495dc76204c1a8c188ef1a8833c7a8cc8b4 Mon Sep 17 00:00:00 2001 From: Andreas Ahlenstorf Date: Sat, 4 Apr 2026 18:23:06 +0200 Subject: [PATCH 083/352] feat: support `timezone` in scheduled workflows (#11851) GitHub recently added the ability to [specify a time zone for scheduled workflows](https://docs.github.com/en/actions/reference/workflows-and-actions/workflow-syntax#onschedule), thereby making it possible to run scheduled workflows at a certain local time, no matter whether daylight saving time (DST) is currently active or not. Example copied from GitHub's documentation: ```yaml on: schedule: - cron: '30 5 * * 1-5' timezone: "America/New_York" ``` The workflow would run at 05:30 each morning in the America/New_York timezone every Monday through Friday. `timezone` accepts IANA time zone names. If `timezone` is absent, `Etc/UTC` is used. GitHub runs workflows that were scheduled during DST jumps forward, for example, between 2 o'clock and 3 o'clock, directly after the clock jumped forward. In this case, that would be 3 o'clock. Forgejo already supports time zones by prepending cron schedules with `TZ=` or `CRON_TZ=`: ```yaml on: schedule: - cron: 'CRON_TZ=America/New_York 30 5 * * 1-5' ``` However, that capability is not documented. Workflows that are scheduled to run during DST changes are skipped when the clock jumps forward and run twice when it jumps backward. This two-part PR adds support for `timezone` to improve compatibility with GitHub. `TZ` and `CRON_TZ` continue working. When both `timezone` and `TZ` or `CRON_TZ` are present, `timezone` takes precedence. When neither `timezone` nor `TZ` nor `CRON_TZ` are present, `Etc/UTC` is used as before. Because `TZ` and `CRON_TZ` were already supported by Forgejo before GitHub introduced `timezone`, `timezone` behaves during DST changes as previous versions of Forgejo, thereby deviating from GitHub. That means that workflows that are scheduled to run during DST changes are skipped when the clock jumps forward. And they run twice when it jumps backwards. However, it is generally recommended not to schedule workflows during the time of day when DST changes occur. This part of the PR integrates the [workflow validation and parsing of the `timezone` field](https://code.forgejo.org/forgejo/runner/pulls/1454) supplied by Forgejo Runner. ## Checklist The [contributor guide](https://forgejo.org/docs/next/contributor/) contains information that will be helpful to first time contributors. All work and communication must conform to Forgejo's [AI Agreement](https://codeberg.org/forgejo/governance/src/branch/main/AIAgreement.md). There also are a few [conditions for merging Pull Requests in Forgejo repositories](https://codeberg.org/forgejo/governance/src/branch/main/PullRequestsAgreement.md). You are also welcome to join the [Forgejo development chatroom](https://matrix.to/#/#forgejo-development:matrix.org). ### Tests for Go changes (can be removed for JavaScript changes) - I added test coverage for Go changes... - [x] in their respective `*_test.go` for unit tests. - [x] in the `tests/integration` directory if it involves interactions with a live Forgejo server. - I ran... - [x] `make pr-go` before pushing ### Tests for JavaScript changes (can be removed for Go changes) - I added test coverage for JavaScript changes... - [ ] in `web_src/js/*.test.js` if it can be unit tested. - [ ] in `tests/e2e/*.test.e2e.js` if it requires interactions with a live Forgejo server (see also the [developer guide for JavaScript testing](https://codeberg.org/forgejo/forgejo/src/branch/forgejo/tests/e2e/README.md#end-to-end-tests)). ### Documentation - [x] I created a pull request [to the documentation](https://codeberg.org/forgejo/docs) to explain to Forgejo users how to use this change. - https://codeberg.org/forgejo/docs/pulls/1853 - [ ] I did not document these changes and I do not expect someone else to do it. ### Release notes - [x] This change will be noticed by a Forgejo user or admin (feature, bug fix, performance, etc.). I suggest to include a release note for this change. - [ ] This change is not visible to a Forgejo user or admin (refactor, dependency upgrade, etc.). I think there is no need to add a release note for this change. *The decision if the pull request will be shown in the release notes is up to the mergers / release team.* The content of the `release-notes/.md` file will serve as the basis for the release notes. If the file does not exist, the title of the pull request will be used instead. ## Release notes - Features - [PR](https://codeberg.org/forgejo/forgejo/pulls/11851): support `timezone` in scheduled workflows Co-authored-by: Renovate Bot Reviewed-on: https://codeberg.org/forgejo/forgejo/pulls/11851 Reviewed-by: Mathieu Fenniak Co-authored-by: Andreas Ahlenstorf Co-committed-by: Andreas Ahlenstorf --- go.mod | 2 +- go.sum | 4 +- models/actions/schedule.go | 22 +--- models/actions/schedule_spec.go | 44 ++++++-- models/actions/schedule_spec_test.go | 79 ++++++++++++-- models/actions/schedule_test.go | 102 ++++++++++++++++++ .../v15c_add_schedule_spec_time_zones.go | 31 ++++++ .../action_schedule.yml | 4 - services/actions/notifier_helper.go | 14 ++- services/actions/schedule_tasks_test.go | 30 ++++-- tests/integration/actions_trigger_test.go | 49 ++++++++- 11 files changed, 323 insertions(+), 58 deletions(-) create mode 100644 models/actions/schedule_test.go create mode 100644 models/forgejo_migrations/v15c_add_schedule_spec_time_zones.go diff --git a/go.mod b/go.mod index c02310548b..37039873c8 100644 --- a/go.mod +++ b/go.mod @@ -11,7 +11,7 @@ require ( code.forgejo.org/forgejo/go-rpmutils v1.0.0 code.forgejo.org/forgejo/levelqueue v1.0.0 code.forgejo.org/forgejo/reply v1.0.2 - code.forgejo.org/forgejo/runner/v12 v12.7.3 + code.forgejo.org/forgejo/runner/v12 v12.8.0 code.forgejo.org/go-chi/binding v1.0.1 code.forgejo.org/go-chi/cache v1.0.1 code.forgejo.org/go-chi/captcha v1.0.2 diff --git a/go.sum b/go.sum index 8c06077317..cd477466b7 100644 --- a/go.sum +++ b/go.sum @@ -30,8 +30,8 @@ code.forgejo.org/forgejo/levelqueue v1.0.0 h1:9krYpU6BM+j/1Ntj6m+VCAIu0UNnne1/Uf code.forgejo.org/forgejo/levelqueue v1.0.0/go.mod h1:fmG6zhVuqim2rxSFOoasgXO8V2W/k9U31VVYqLIRLhQ= code.forgejo.org/forgejo/reply v1.0.2 h1:dMhQCHV6/O3L5CLWNTol+dNzDAuyCK88z4J/lCdgFuQ= code.forgejo.org/forgejo/reply v1.0.2/go.mod h1:RyZUfzQLc+fuLIGjTSQWDAJWPiL4WtKXB/FifT5fM7U= -code.forgejo.org/forgejo/runner/v12 v12.7.3 h1:+thSawVfLeAZaWB6sYeUPvLj4lxYjCIDt/ktvkfX5Rs= -code.forgejo.org/forgejo/runner/v12 v12.7.3/go.mod h1:OO+Vy9Dww6WNV7GG/6VUWo/0WwXY+ASGlINmAfEA9Ws= +code.forgejo.org/forgejo/runner/v12 v12.8.0 h1:/MqOseYbsGaQ2qzepaZr3VyuqpESvSP/ZnC2aKfmU3g= +code.forgejo.org/forgejo/runner/v12 v12.8.0/go.mod h1:sgDAYfO4NJI1kUzGuD7klHuoFLQzWmZPw0erg7QlbJU= code.forgejo.org/forgejo/ssh v0.0.0-20241211213324-5fc306ca0616 h1:kEZL84+02jY9RxXM4zHBWZ3Fml0B09cmP1LGkDsCfIA= code.forgejo.org/forgejo/ssh v0.0.0-20241211213324-5fc306ca0616/go.mod h1:zpHEXBstFnQYtGnB8k8kQLol82umzn/2/snG7alWVD8= code.forgejo.org/go-chi/binding v1.0.1 h1:coKNI+X1NzRN7X85LlrpvBRqk0TXpJ+ja28vusQWEuY= diff --git a/models/actions/schedule.go b/models/actions/schedule.go index 05c9f15d38..8c410b9d38 100644 --- a/models/actions/schedule.go +++ b/models/actions/schedule.go @@ -5,7 +5,6 @@ package actions import ( "context" - "time" "forgejo.org/models/db" repo_model "forgejo.org/models/repo" @@ -21,7 +20,7 @@ import ( type ActionSchedule struct { ID int64 Title string - Specs []string + Specs []*ActionScheduleSpec `xorm:"-"` RepoID int64 `xorm:"index"` Repo *repo_model.Repository `xorm:"-"` OwnerID int64 `xorm:"index"` @@ -73,25 +72,12 @@ func CreateScheduleTask(ctx context.Context, rows []*ActionSchedule) error { return err } - // Loop through each schedule spec and create a new spec row - now := time.Now() - for _, spec := range row.Specs { - specRow := &ActionScheduleSpec{ - RepoID: row.RepoID, - ScheduleID: row.ID, - Spec: spec, - } - // Parse the spec and check for errors - schedule, err := specRow.Parse() - if err != nil { - continue // skip to the next spec if there's an error - } - - specRow.Next = timeutil.TimeStamp(schedule.Next(now).Unix()) + spec.ScheduleID = row.ID + spec.RepoID = row.RepoID // Insert the new schedule spec row - if err = db.Insert(ctx, specRow); err != nil { + if err = db.Insert(ctx, spec); err != nil { return err } } diff --git a/models/actions/schedule_spec.go b/models/actions/schedule_spec.go index 83bdceb850..bcaee8bd6f 100644 --- a/models/actions/schedule_spec.go +++ b/models/actions/schedule_spec.go @@ -10,6 +10,7 @@ import ( "forgejo.org/models/db" repo_model "forgejo.org/models/repo" + "forgejo.org/modules/optional" "forgejo.org/modules/timeutil" "github.com/robfig/cron/v3" @@ -27,13 +28,28 @@ type ActionScheduleSpec struct { // started or this entry's schedule is unsatisfiable Next timeutil.TimeStamp `xorm:"index"` // Prev is the last time this job was run, or the zero time if never. - Prev timeutil.TimeStamp - Spec string + Prev timeutil.TimeStamp + Spec string + TimeZone optional.Option[string] Created timeutil.TimeStamp `xorm:"created"` Updated timeutil.TimeStamp `xorm:"updated"` } +func NewActionScheduleSpec(cron string, tz optional.Option[string], referenceTime time.Time) (*ActionScheduleSpec, error) { + spec := &ActionScheduleSpec{ + Spec: cron, + TimeZone: tz, + } + cronSchedule, err := spec.Parse() + if err != nil { + return nil, err + } + + spec.Next = timeutil.TimeStamp(cronSchedule.Next(referenceTime).Unix()) + return spec, nil +} + // Parse parses the spec and returns a cron.Schedule // Unlike the default cron parser, Parse uses UTC timezone as the default if none is specified. func (s *ActionScheduleSpec) Parse() (cron.Schedule, error) { @@ -43,19 +59,29 @@ func (s *ActionScheduleSpec) Parse() (cron.Schedule, error) { return nil, err } - // If the spec has specified a timezone, use it - if strings.HasPrefix(s.Spec, "TZ=") || strings.HasPrefix(s.Spec, "CRON_TZ=") { - return schedule, nil - } - specSchedule, ok := schedule.(*cron.SpecSchedule) // If it's not a spec schedule, like "@every 5m", timezone is not relevant if !ok { return schedule, nil } - // Set the timezone to UTC - specSchedule.Location = time.UTC + // If `timezone` is not defined in the workflow, but the spec includes a timezone, use it. + if !s.TimeZone.Has() && (strings.HasPrefix(s.Spec, "TZ=") || strings.HasPrefix(s.Spec, "CRON_TZ=")) { + return schedule, nil + } + + var location *time.Location + if present, tz := s.TimeZone.Get(); present { + location, err = time.LoadLocation(tz) + if err != nil { + return nil, err + } + } else { + // UTC is the default time zone. + location = time.UTC + } + + specSchedule.Location = location return specSchedule, nil } diff --git a/models/actions/schedule_spec_test.go b/models/actions/schedule_spec_test.go index 0c26fce4b2..eb3a83d0a6 100644 --- a/models/actions/schedule_spec_test.go +++ b/models/actions/schedule_spec_test.go @@ -7,6 +7,8 @@ import ( "testing" "time" + "forgejo.org/modules/optional" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -21,50 +23,105 @@ func TestActionScheduleSpec_Parse(t *testing.T) { }() time.Local = tz - now, err := time.Parse(time.RFC3339, "2024-07-31T15:47:55+08:00") - require.NoError(t, err) - tests := []struct { - name string - spec string - want string - wantErr assert.ErrorAssertionFunc + name string + refTime time.Time + spec string + timeZone string + want string + wantErr assert.ErrorAssertionFunc }{ { name: "regular", + refTime: time.Date(2024, 7, 31, 15, 47, 55, 0, time.Local), spec: "0 10 * * *", want: "2024-07-31T10:00:00Z", wantErr: assert.NoError, }, { name: "invalid", + refTime: time.Date(2024, 7, 31, 15, 47, 55, 0, time.Local), spec: "0 10 * *", want: "", wantErr: assert.Error, }, { - name: "with timezone", + name: "with TZ in cron schedule", + refTime: time.Date(2024, 7, 31, 15, 47, 55, 0, time.Local), spec: "TZ=America/New_York 0 10 * * *", want: "2024-07-31T14:00:00Z", wantErr: assert.NoError, }, { - name: "timezone irrelevant", + name: "with CRON_TZ in cron schedule", + refTime: time.Date(2024, 7, 31, 15, 47, 55, 0, time.Local), + spec: "CRON_TZ=America/New_York 0 10 * * *", + want: "2024-07-31T14:00:00Z", + wantErr: assert.NoError, + }, + { + name: "with separate time zone", + refTime: time.Date(2024, 7, 31, 15, 47, 55, 0, time.Local), + spec: "0 10 * * *", + timeZone: "America/New_York", + want: "2024-07-31T14:00:00Z", + wantErr: assert.NoError, + }, + { + name: "separate time zone takes precedence over inlined time zone", + refTime: time.Date(2024, 7, 31, 15, 47, 55, 0, time.Local), + spec: "CRON_TZ=Europe/Berlin 0 10 * * *", + timeZone: "America/New_York", + want: "2024-07-31T14:00:00Z", + wantErr: assert.NoError, + }, + { + name: "time zone irrelevant", + refTime: time.Date(2024, 7, 31, 15, 47, 55, 0, time.Local), spec: "@every 5m", want: "2024-07-31T07:52:55Z", wantErr: assert.NoError, }, + { + // The various cron implementations handle the DST jump forwards differently. The most popular approaches + // are (a) scheduling all jobs at 3 o'clock that were supposed to run between 2 and 3 o'clock, or (b) + // skipping the execution on that day because any time between 2 and 3 o'clock never happened. Forgejo uses + // option B because the code it inherited already did that and was exposed to users. + name: "skips execution during DST jump forwards", + refTime: time.Date(2025, 3, 30, 1, 5, 0, 0, time.UTC), + spec: "10 2 * * *", // The clock jumps at 2 o'clock to 3 o'clock. + timeZone: "Europe/Berlin", + want: "2025-03-31T00:10:00Z", + wantErr: assert.NoError, + }, + { + name: "executes a first time before DST jump backwards", + refTime: time.Date(2025, 10, 26, 0, 5, 0, 0, time.UTC), + spec: "10 2 * * *", // The clock jumps at 3 o'clock to 2 o'clock. + timeZone: "Europe/Berlin", + want: "2025-10-26T00:10:00Z", + wantErr: assert.NoError, + }, + { + name: "executes a second time after DST jump backwards", + refTime: time.Date(2025, 10, 26, 1, 5, 0, 0, time.UTC), + spec: "10 2 * * *", // The clock jumps at 3 o'clock to 2 o'clock. + timeZone: "Europe/Berlin", + want: "2025-10-26T01:10:00Z", + wantErr: assert.NoError, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { s := &ActionScheduleSpec{ - Spec: tt.spec, + Spec: tt.spec, + TimeZone: optional.FromNonDefault(tt.timeZone), } got, err := s.Parse() tt.wantErr(t, err) if err == nil { - assert.Equal(t, tt.want, got.Next(now).UTC().Format(time.RFC3339)) + assert.Equal(t, tt.want, got.Next(tt.refTime).UTC().Format(time.RFC3339)) } }) } diff --git a/models/actions/schedule_test.go b/models/actions/schedule_test.go new file mode 100644 index 0000000000..016185cb42 --- /dev/null +++ b/models/actions/schedule_test.go @@ -0,0 +1,102 @@ +// Copyright 2026 The Forgejo Authors. All rights reserved. +// SPDX-License-Identifier: GPL-3.0-or-later + +package actions + +import ( + "testing" + "time" + + "forgejo.org/models/db" + "forgejo.org/models/repo" + "forgejo.org/models/unittest" + "forgejo.org/models/user" + "forgejo.org/modules/optional" + "forgejo.org/modules/timeutil" + "forgejo.org/modules/webhook" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestScheduleCreateScheduleTask(t *testing.T) { + require.NoError(t, unittest.PrepareTestDatabase()) + + user2 := unittest.AssertExistsAndLoadBean(t, &user.User{ID: 2}) + repo62 := unittest.AssertExistsAndLoadBean(t, &repo.Repository{ID: 62, Name: "test_workflows", OwnerID: user2.ID}) + + content := ` +on: + push: + schedule: + - cron: "2 13 * * *" + - cron: "03 13 * * *" + timezone: Europe/Paris +jobs: + test: + runs-on: debian + steps: + - run: | + echo "OK" +` + + referenceTime := time.Date(2026, 3, 27, 17, 41, 21, 0, time.UTC) + + specWithoutTZ, err := NewActionScheduleSpec("2 13 * * *", optional.None[string](), referenceTime) + require.NoError(t, err) + + specWithTZ, err := NewActionScheduleSpec("3 13 * * *", optional.Some("Europe/Paris"), referenceTime) + require.NoError(t, err) + + schedule := &ActionSchedule{ + Title: ".forgejo/workflows/test.yaml", + Specs: []*ActionScheduleSpec{specWithoutTZ, specWithTZ}, + RepoID: repo62.ID, + OwnerID: user2.ID, + WorkflowID: "test.yaml", + WorkflowDirectory: ".forgejo/workflows", + TriggerUserID: -2, + Ref: "main", + CommitSHA: "6af834a5bc97c1a337eb3a21d26903c5cdceca0c", + Event: webhook.HookEventPush, + EventPayload: "{\"action\":\"schedule\"}", + Content: []byte(content), + } + + err = CreateScheduleTask(t.Context(), []*ActionSchedule{schedule}) + require.NoError(t, err) + + schedules, err := db.Find[ActionSchedule](t.Context(), FindScheduleOptions{OwnerID: user2.ID, RepoID: repo62.ID}) + require.NoError(t, err) + require.Len(t, schedules, 1) + + assert.NotZero(t, schedules[0].ID) + assert.Equal(t, ".forgejo/workflows/test.yaml", schedules[0].Title) + assert.Equal(t, "test.yaml", schedules[0].WorkflowID) + assert.Equal(t, ".forgejo/workflows", schedules[0].WorkflowDirectory) + assert.Equal(t, int64(-2), schedules[0].TriggerUserID) + assert.Equal(t, "main", schedules[0].Ref) + assert.Equal(t, "6af834a5bc97c1a337eb3a21d26903c5cdceca0c", schedules[0].CommitSHA) + assert.Equal(t, webhook.HookEventPush, schedules[0].Event) + assert.JSONEq(t, "{\"action\":\"schedule\"}", schedules[0].EventPayload) + assert.Equal(t, []byte(content), schedules[0].Content) + + specs, total, err := FindSpecs(t.Context(), FindSpecOptions{RepoID: repo62.ID}) + require.NoError(t, err) + + assert.Equal(t, int64(2), total) + + assert.NotZero(t, specs[0].ID) + assert.Equal(t, schedules[0].ID, specs[0].ScheduleID) + assert.Equal(t, timeutil.TimeStamp(1774699380), specs[0].Next) + assert.Equal(t, "3 13 * * *", specs[0].Spec) + assert.Equal(t, optional.Some("Europe/Paris"), specs[0].TimeZone) + assert.Zero(t, specs[0].Prev) + + assert.NotZero(t, specs[1].ID) + assert.Equal(t, schedules[0].ID, specs[1].ScheduleID) + assert.Equal(t, timeutil.TimeStamp(1774702920), specs[1].Next) + assert.Equal(t, "2 13 * * *", specs[1].Spec) + assert.Equal(t, optional.None[string](), specs[1].TimeZone) + assert.Zero(t, specs[1].Prev) +} diff --git a/models/forgejo_migrations/v15c_add_schedule_spec_time_zones.go b/models/forgejo_migrations/v15c_add_schedule_spec_time_zones.go new file mode 100644 index 0000000000..d72d585725 --- /dev/null +++ b/models/forgejo_migrations/v15c_add_schedule_spec_time_zones.go @@ -0,0 +1,31 @@ +// Copyright 2026 The Forgejo Authors. All rights reserved. +// SPDX-License-Identifier: GPL-3.0-or-later + +package forgejo_migrations + +import ( + "forgejo.org/modules/optional" + + "xorm.io/xorm" +) + +func init() { + registerMigration(&Migration{ + Description: "add time zone support to action_schedule_spec", + Upgrade: addActionScheduleSpecTimeZone, + }) +} + +func addActionScheduleSpecTimeZone(x *xorm.Engine) error { + type ActionScheduleSpec struct { + TimeZone optional.Option[string] + } + + _, err := x.SyncWithOptions(xorm.SyncOptions{IgnoreDropIndices: true}, new(ActionScheduleSpec)) + if err != nil { + return err + } + + _, err = x.Exec("ALTER TABLE action_schedule DROP COLUMN `specs`") + return err +} diff --git a/services/actions/TestServiceActions_startTask/action_schedule.yml b/services/actions/TestServiceActions_startTask/action_schedule.yml index d0e7234475..8102e3f9e3 100644 --- a/services/actions/TestServiceActions_startTask/action_schedule.yml +++ b/services/actions/TestServiceActions_startTask/action_schedule.yml @@ -2,8 +2,6 @@ - id: 1 title: schedule_title1 - specs: - - '* * * * *' repo_id: 4 owner_id: 2 workflow_id: 'workflow1.yml' @@ -23,8 +21,6 @@ - id: 2 title: schedule_title2 - specs: - - '* * * * *' repo_id: 4 owner_id: 2 workflow_id: 'workflow2.yml' diff --git a/services/actions/notifier_helper.go b/services/actions/notifier_helper.go index c6af9b5b82..5e048a83ad 100644 --- a/services/actions/notifier_helper.go +++ b/services/actions/notifier_helper.go @@ -10,6 +10,7 @@ import ( "fmt" "slices" "strings" + "time" actions_model "forgejo.org/models/actions" "forgejo.org/models/db" @@ -24,6 +25,7 @@ import ( "forgejo.org/modules/gitrepo" "forgejo.org/modules/json" "forgejo.org/modules/log" + "forgejo.org/modules/optional" "forgejo.org/modules/setting" api "forgejo.org/modules/structs" "forgejo.org/modules/util" @@ -574,6 +576,16 @@ func handleSchedules( continue } + now := time.Now() + specs := make([]*actions_model.ActionScheduleSpec, 0, len(schedules)) + for _, schedule := range schedules { + scheduleSpec, err := actions_model.NewActionScheduleSpec(schedule.Cron, optional.FromNonDefault(schedule.TimeZone), now) + if err != nil { + return err + } + specs = append(specs, scheduleSpec) + } + title := workflow.Name if len(title) < 1 { title = dwf.GetWorkflowPath() @@ -590,7 +602,7 @@ func handleSchedules( CommitSHA: commit.ID.String(), Event: input.Event, EventPayload: string(p), - Specs: schedules, + Specs: specs, Content: dwf.Content, } crons = append(crons, run) diff --git a/services/actions/schedule_tasks_test.go b/services/actions/schedule_tasks_test.go index 9bf964fd90..57ee6b955a 100644 --- a/services/actions/schedule_tasks_test.go +++ b/services/actions/schedule_tasks_test.go @@ -6,12 +6,14 @@ package actions import ( "context" "testing" + "time" actions_model "forgejo.org/models/actions" "forgejo.org/models/db" repo_model "forgejo.org/models/repo" "forgejo.org/models/unit" "forgejo.org/models/unittest" + "forgejo.org/modules/optional" "forgejo.org/modules/test" "forgejo.org/modules/timeutil" webhook_module "forgejo.org/modules/webhook" @@ -29,6 +31,9 @@ func TestServiceActions_startTask(t *testing.T) { // Load fixtures that are corrupted and create one valid scheduled workflow repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 4}) + spec, err := actions_model.NewActionScheduleSpec("* * * * *", optional.None[string](), time.Now()) + require.NoError(t, err) + workflowID := "some.yml" schedules := []*actions_model.ActionSchedule{ { @@ -42,7 +47,7 @@ func TestServiceActions_startTask(t *testing.T) { CommitSHA: "fakeSHA", Event: webhook_module.HookEventSchedule, EventPayload: "fakepayload", - Specs: []string{"* * * * *"}, + Specs: []*actions_model.ActionScheduleSpec{spec}, Content: []byte( ` jobs: @@ -57,7 +62,7 @@ jobs: require.Equal(t, 2, unittest.GetCount(t, actions_model.ActionScheduleSpec{})) require.NoError(t, actions_model.CreateScheduleTask(t.Context(), schedules)) require.Equal(t, 3, unittest.GetCount(t, actions_model.ActionScheduleSpec{})) - _, err := db.GetEngine(db.DefaultContext).Exec("UPDATE `action_schedule_spec` SET next = 1") + _, err = db.GetEngine(db.DefaultContext).Exec("UPDATE `action_schedule_spec` SET next = 1") require.NoError(t, err) // After running startTasks an ActionRun row is created for the valid scheduled workflow @@ -291,6 +296,9 @@ func TestServiceActions_DynamicMatrix(t *testing.T) { // Load fixtures that are corrupted and create one valid scheduled workflow repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 4}) + spec, err := actions_model.NewActionScheduleSpec("* * * * *", optional.None[string](), time.Now()) + require.NoError(t, err) + workflowID := "some.yml" schedules := []*actions_model.ActionSchedule{ { @@ -304,7 +312,7 @@ func TestServiceActions_DynamicMatrix(t *testing.T) { CommitSHA: "fakeSHA", Event: webhook_module.HookEventSchedule, EventPayload: "fakepayload", - Specs: []string{"* * * * *"}, + Specs: []*actions_model.ActionScheduleSpec{spec}, Content: []byte( ` jobs: @@ -322,7 +330,7 @@ jobs: require.Equal(t, 2, unittest.GetCount(t, actions_model.ActionScheduleSpec{})) require.NoError(t, actions_model.CreateScheduleTask(t.Context(), schedules)) require.Equal(t, 3, unittest.GetCount(t, actions_model.ActionScheduleSpec{})) - _, err := db.GetEngine(db.DefaultContext).Exec("UPDATE `action_schedule_spec` SET next = 1") + _, err = db.GetEngine(db.DefaultContext).Exec("UPDATE `action_schedule_spec` SET next = 1") require.NoError(t, err) // After running startTasks an ActionRun row is created for the valid scheduled workflow @@ -354,6 +362,9 @@ func TestServiceActions_RunsOnNeeds(t *testing.T) { // Load fixtures that are corrupted and create one valid scheduled workflow repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 4}) + spec, err := actions_model.NewActionScheduleSpec("* * * * *", optional.None[string](), time.Now()) + require.NoError(t, err) + workflowID := "some.yml" schedules := []*actions_model.ActionSchedule{ { @@ -366,7 +377,7 @@ func TestServiceActions_RunsOnNeeds(t *testing.T) { CommitSHA: "fakeSHA", Event: webhook_module.HookEventSchedule, EventPayload: "fakepayload", - Specs: []string{"* * * * *"}, + Specs: []*actions_model.ActionScheduleSpec{spec}, Content: []byte( ` jobs: @@ -381,7 +392,7 @@ jobs: require.Equal(t, 2, unittest.GetCount(t, actions_model.ActionScheduleSpec{})) require.NoError(t, actions_model.CreateScheduleTask(t.Context(), schedules)) require.Equal(t, 3, unittest.GetCount(t, actions_model.ActionScheduleSpec{})) - _, err := db.GetEngine(db.DefaultContext).Exec("UPDATE `action_schedule_spec` SET next = 1") + _, err = db.GetEngine(db.DefaultContext).Exec("UPDATE `action_schedule_spec` SET next = 1") require.NoError(t, err) // After running startTasks an ActionRun row is created for the valid scheduled workflow @@ -440,6 +451,9 @@ func TestServiceActions_ExpandReusableWorkflow(t *testing.T) { // Load fixtures that are corrupted and create one valid scheduled workflow repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 4}) + spec, err := actions_model.NewActionScheduleSpec("* * * * *", optional.None[string](), time.Now()) + require.NoError(t, err) + workflowID := "some.yml" schedules := []*actions_model.ActionSchedule{ { @@ -452,7 +466,7 @@ func TestServiceActions_ExpandReusableWorkflow(t *testing.T) { CommitSHA: "fakeSHA", Event: webhook_module.HookEventSchedule, EventPayload: "fakepayload", - Specs: []string{"* * * * *"}, + Specs: []*actions_model.ActionScheduleSpec{spec}, Content: []byte( ` jobs: @@ -467,7 +481,7 @@ jobs: require.Equal(t, 2, unittest.GetCount(t, actions_model.ActionScheduleSpec{})) require.NoError(t, actions_model.CreateScheduleTask(t.Context(), schedules)) require.Equal(t, 3, unittest.GetCount(t, actions_model.ActionScheduleSpec{})) - _, err := db.GetEngine(db.DefaultContext).Exec("UPDATE `action_schedule_spec` SET next = 1") + _, err = db.GetEngine(db.DefaultContext).Exec("UPDATE `action_schedule_spec` SET next = 1") require.NoError(t, err) // After running startTasks an ActionRun row is created for the valid scheduled workflow diff --git a/tests/integration/actions_trigger_test.go b/tests/integration/actions_trigger_test.go index 4de8d625ab..914acadf14 100644 --- a/tests/integration/actions_trigger_test.go +++ b/tests/integration/actions_trigger_test.go @@ -7,6 +7,7 @@ import ( "fmt" "net/http" "net/url" + "slices" "strings" "testing" "time" @@ -23,6 +24,7 @@ import ( actions_module "forgejo.org/modules/actions" "forgejo.org/modules/git" "forgejo.org/modules/gitrepo" + "forgejo.org/modules/optional" "forgejo.org/modules/setting" api "forgejo.org/modules/structs" "forgejo.org/modules/test" @@ -1136,13 +1138,18 @@ func TestActionsWorkflowDispatchConcurrencyGroup(t *testing.T) { } func TestActionsScheduledWorkflow(t *testing.T) { + type expectedSpec struct { + cron string + timeZone optional.Option[string] + } + testCases := []struct { name string workflowID string workflowDirectory string workflowContent string expectedWorkflowTitle string - expectedCronSpecs []string + expectedCronSpecs []expectedSpec }{ { name: "GitHub", @@ -1158,7 +1165,7 @@ jobs: - run: echo OK `, expectedWorkflowTitle: ".github/workflows/scheduled.yml", - expectedCronSpecs: []string{"30 5,17 * * *"}, + expectedCronSpecs: []expectedSpec{{cron: "30 5,17 * * *", timeZone: optional.None[string]()}}, }, { name: "Gitea", @@ -1175,7 +1182,28 @@ jobs: - run: echo OK `, expectedWorkflowTitle: "My scheduled workflow", - expectedCronSpecs: []string{"* * * * *"}, + expectedCronSpecs: []expectedSpec{{cron: "* * * * *", timeZone: optional.None[string]()}}, + }, + { + name: "Forgejo with time zone", + workflowID: "tz.yml", + workflowDirectory: ".forgejo/workflows", + workflowContent: ` +on: + schedule: + - cron: "44 10 * * *" + - cron: "25 19 * * *" + timezone: Europe/Madrid +jobs: + test: + steps: + - run: echo OK +`, + expectedWorkflowTitle: ".forgejo/workflows/tz.yml", + expectedCronSpecs: []expectedSpec{ + {cron: "44 10 * * *", timeZone: optional.None[string]()}, + {cron: "25 19 * * *", timeZone: optional.Some("Europe/Madrid")}, + }, }, } onApplicationRun(t, func(t *testing.T, u *url.URL) { @@ -1201,7 +1229,6 @@ jobs: require.Len(t, schedules, 1) assert.Equal(t, testCase.expectedWorkflowTitle, schedules[0].Title) - assert.Equal(t, testCase.expectedCronSpecs, schedules[0].Specs) assert.Equal(t, repo.ID, schedules[0].RepoID) assert.Equal(t, repo.OwnerID, schedules[0].OwnerID) assert.Equal(t, testCase.workflowID, schedules[0].WorkflowID) @@ -1210,6 +1237,20 @@ jobs: assert.Equal(t, sha, schedules[0].CommitSHA) assert.Equal(t, webhook_module.HookEventPush, schedules[0].Event) assert.Equal(t, []byte(testCase.workflowContent), schedules[0].Content) + + specs, total, err := actions_model.FindSpecs(t.Context(), actions_model.FindSpecOptions{RepoID: repo.ID}) + require.NoError(t, err) + + assert.Equal(t, int64(len(testCase.expectedCronSpecs)), total) + + // The query to return cron specs orders by `id DESC`. + slices.Reverse(testCase.expectedCronSpecs) + + for i, expected := range testCase.expectedCronSpecs { + assert.Equal(t, schedules[0].ID, specs[i].ScheduleID) + assert.Equal(t, expected.cron, specs[i].Spec) + assert.Equal(t, expected.timeZone, specs[i].TimeZone) + } }) } }) From 4ca6b703af64cc9bdb457e827a5d16f683c2b052 Mon Sep 17 00:00:00 2001 From: forgejo-backport-action Date: Sat, 4 Apr 2026 19:16:35 +0200 Subject: [PATCH 084/352] [v15.0/forgejo] feat: support `timezone` in scheduled workflows (#11986) **Backport:** https://codeberg.org/forgejo/forgejo/pulls/11851 GitHub recently added the ability to [specify a time zone for scheduled workflows](https://docs.github.com/en/actions/reference/workflows-and-actions/workflow-syntax#onschedule), thereby making it possible to run scheduled workflows at a certain local time, no matter whether daylight saving time (DST) is currently active or not. Example copied from GitHub's documentation: ```yaml on: schedule: - cron: '30 5 * * 1-5' timezone: "America/New_York" ``` The workflow would run at 05:30 each morning in the America/New_York timezone every Monday through Friday. `timezone` accepts IANA time zone names. If `timezone` is absent, `Etc/UTC` is used. GitHub runs workflows that were scheduled during DST jumps forward, for example, between 2 o'clock and 3 o'clock, directly after the clock jumped forward. In this case, that would be 3 o'clock. Forgejo already supports time zones by prepending cron schedules with `TZ=` or `CRON_TZ=`: ```yaml on: schedule: - cron: 'CRON_TZ=America/New_York 30 5 * * 1-5' ``` However, that capability is not documented. Workflows that are scheduled to run during DST changes are skipped when the clock jumps forward and run twice when it jumps backward. This two-part PR adds support for `timezone` to improve compatibility with GitHub. `TZ` and `CRON_TZ` continue working. When both `timezone` and `TZ` or `CRON_TZ` are present, `timezone` takes precedence. When neither `timezone` nor `TZ` nor `CRON_TZ` are present, `Etc/UTC` is used as before. Because `TZ` and `CRON_TZ` were already supported by Forgejo before GitHub introduced `timezone`, `timezone` behaves during DST changes as previous versions of Forgejo, thereby deviating from GitHub. That means that workflows that are scheduled to run during DST changes are skipped when the clock jumps forward. And they run twice when it jumps backwards. However, it is generally recommended not to schedule workflows during the time of day when DST changes occur. This part of the PR integrates the [workflow validation and parsing of the `timezone` field](https://code.forgejo.org/forgejo/runner/pulls/1454) supplied by Forgejo Runner. ## Checklist The [contributor guide](https://forgejo.org/docs/next/contributor/) contains information that will be helpful to first time contributors. All work and communication must conform to Forgejo's [AI Agreement](https://codeberg.org/forgejo/governance/src/branch/main/AIAgreement.md). There also are a few [conditions for merging Pull Requests in Forgejo repositories](https://codeberg.org/forgejo/governance/src/branch/main/PullRequestsAgreement.md). You are also welcome to join the [Forgejo development chatroom](https://matrix.to/#/#forgejo-development:matrix.org). ### Tests for Go changes (can be removed for JavaScript changes) - I added test coverage for Go changes... - [x] in their respective `*_test.go` for unit tests. - [x] in the `tests/integration` directory if it involves interactions with a live Forgejo server. - I ran... - [x] `make pr-go` before pushing ### Tests for JavaScript changes (can be removed for Go changes) - I added test coverage for JavaScript changes... - [ ] in `web_src/js/*.test.js` if it can be unit tested. - [ ] in `tests/e2e/*.test.e2e.js` if it requires interactions with a live Forgejo server (see also the [developer guide for JavaScript testing](https://codeberg.org/forgejo/forgejo/src/branch/forgejo/tests/e2e/README.md#end-to-end-tests)). ### Documentation - [x] I created a pull request [to the documentation](https://codeberg.org/forgejo/docs) to explain to Forgejo users how to use this change. - https://codeberg.org/forgejo/docs/pulls/1853 - [ ] I did not document these changes and I do not expect someone else to do it. ### Release notes - [x] This change will be noticed by a Forgejo user or admin (feature, bug fix, performance, etc.). I suggest to include a release note for this change. - [ ] This change is not visible to a Forgejo user or admin (refactor, dependency upgrade, etc.). I think there is no need to add a release note for this change. *The decision if the pull request will be shown in the release notes is up to the mergers / release team.* The content of the `release-notes/.md` file will serve as the basis for the release notes. If the file does not exist, the title of the pull request will be used instead. ## Release notes - Features - [PR](https://codeberg.org/forgejo/forgejo/pulls/11851): support `timezone` in scheduled workflows Co-authored-by: Andreas Ahlenstorf Reviewed-on: https://codeberg.org/forgejo/forgejo/pulls/11986 Reviewed-by: Mathieu Fenniak Co-authored-by: forgejo-backport-action Co-committed-by: forgejo-backport-action --- go.mod | 2 +- go.sum | 4 +- models/actions/schedule.go | 22 +--- models/actions/schedule_spec.go | 44 ++++++-- models/actions/schedule_spec_test.go | 79 ++++++++++++-- models/actions/schedule_test.go | 102 ++++++++++++++++++ .../v15c_add_schedule_spec_time_zones.go | 31 ++++++ .../action_schedule.yml | 4 - services/actions/notifier_helper.go | 14 ++- services/actions/schedule_tasks_test.go | 30 ++++-- tests/integration/actions_trigger_test.go | 49 ++++++++- 11 files changed, 323 insertions(+), 58 deletions(-) create mode 100644 models/actions/schedule_test.go create mode 100644 models/forgejo_migrations/v15c_add_schedule_spec_time_zones.go diff --git a/go.mod b/go.mod index 9f751ccbb1..b37a7eb424 100644 --- a/go.mod +++ b/go.mod @@ -11,7 +11,7 @@ require ( code.forgejo.org/forgejo/go-rpmutils v1.0.0 code.forgejo.org/forgejo/levelqueue v1.0.0 code.forgejo.org/forgejo/reply v1.0.2 - code.forgejo.org/forgejo/runner/v12 v12.7.3 + code.forgejo.org/forgejo/runner/v12 v12.8.0 code.forgejo.org/go-chi/binding v1.0.1 code.forgejo.org/go-chi/cache v1.0.1 code.forgejo.org/go-chi/captcha v1.0.2 diff --git a/go.sum b/go.sum index 3e4ee526b9..76e13223a8 100644 --- a/go.sum +++ b/go.sum @@ -30,8 +30,8 @@ code.forgejo.org/forgejo/levelqueue v1.0.0 h1:9krYpU6BM+j/1Ntj6m+VCAIu0UNnne1/Uf code.forgejo.org/forgejo/levelqueue v1.0.0/go.mod h1:fmG6zhVuqim2rxSFOoasgXO8V2W/k9U31VVYqLIRLhQ= code.forgejo.org/forgejo/reply v1.0.2 h1:dMhQCHV6/O3L5CLWNTol+dNzDAuyCK88z4J/lCdgFuQ= code.forgejo.org/forgejo/reply v1.0.2/go.mod h1:RyZUfzQLc+fuLIGjTSQWDAJWPiL4WtKXB/FifT5fM7U= -code.forgejo.org/forgejo/runner/v12 v12.7.3 h1:+thSawVfLeAZaWB6sYeUPvLj4lxYjCIDt/ktvkfX5Rs= -code.forgejo.org/forgejo/runner/v12 v12.7.3/go.mod h1:OO+Vy9Dww6WNV7GG/6VUWo/0WwXY+ASGlINmAfEA9Ws= +code.forgejo.org/forgejo/runner/v12 v12.8.0 h1:/MqOseYbsGaQ2qzepaZr3VyuqpESvSP/ZnC2aKfmU3g= +code.forgejo.org/forgejo/runner/v12 v12.8.0/go.mod h1:sgDAYfO4NJI1kUzGuD7klHuoFLQzWmZPw0erg7QlbJU= code.forgejo.org/forgejo/ssh v0.0.0-20241211213324-5fc306ca0616 h1:kEZL84+02jY9RxXM4zHBWZ3Fml0B09cmP1LGkDsCfIA= code.forgejo.org/forgejo/ssh v0.0.0-20241211213324-5fc306ca0616/go.mod h1:zpHEXBstFnQYtGnB8k8kQLol82umzn/2/snG7alWVD8= code.forgejo.org/go-chi/binding v1.0.1 h1:coKNI+X1NzRN7X85LlrpvBRqk0TXpJ+ja28vusQWEuY= diff --git a/models/actions/schedule.go b/models/actions/schedule.go index 05c9f15d38..8c410b9d38 100644 --- a/models/actions/schedule.go +++ b/models/actions/schedule.go @@ -5,7 +5,6 @@ package actions import ( "context" - "time" "forgejo.org/models/db" repo_model "forgejo.org/models/repo" @@ -21,7 +20,7 @@ import ( type ActionSchedule struct { ID int64 Title string - Specs []string + Specs []*ActionScheduleSpec `xorm:"-"` RepoID int64 `xorm:"index"` Repo *repo_model.Repository `xorm:"-"` OwnerID int64 `xorm:"index"` @@ -73,25 +72,12 @@ func CreateScheduleTask(ctx context.Context, rows []*ActionSchedule) error { return err } - // Loop through each schedule spec and create a new spec row - now := time.Now() - for _, spec := range row.Specs { - specRow := &ActionScheduleSpec{ - RepoID: row.RepoID, - ScheduleID: row.ID, - Spec: spec, - } - // Parse the spec and check for errors - schedule, err := specRow.Parse() - if err != nil { - continue // skip to the next spec if there's an error - } - - specRow.Next = timeutil.TimeStamp(schedule.Next(now).Unix()) + spec.ScheduleID = row.ID + spec.RepoID = row.RepoID // Insert the new schedule spec row - if err = db.Insert(ctx, specRow); err != nil { + if err = db.Insert(ctx, spec); err != nil { return err } } diff --git a/models/actions/schedule_spec.go b/models/actions/schedule_spec.go index 83bdceb850..bcaee8bd6f 100644 --- a/models/actions/schedule_spec.go +++ b/models/actions/schedule_spec.go @@ -10,6 +10,7 @@ import ( "forgejo.org/models/db" repo_model "forgejo.org/models/repo" + "forgejo.org/modules/optional" "forgejo.org/modules/timeutil" "github.com/robfig/cron/v3" @@ -27,13 +28,28 @@ type ActionScheduleSpec struct { // started or this entry's schedule is unsatisfiable Next timeutil.TimeStamp `xorm:"index"` // Prev is the last time this job was run, or the zero time if never. - Prev timeutil.TimeStamp - Spec string + Prev timeutil.TimeStamp + Spec string + TimeZone optional.Option[string] Created timeutil.TimeStamp `xorm:"created"` Updated timeutil.TimeStamp `xorm:"updated"` } +func NewActionScheduleSpec(cron string, tz optional.Option[string], referenceTime time.Time) (*ActionScheduleSpec, error) { + spec := &ActionScheduleSpec{ + Spec: cron, + TimeZone: tz, + } + cronSchedule, err := spec.Parse() + if err != nil { + return nil, err + } + + spec.Next = timeutil.TimeStamp(cronSchedule.Next(referenceTime).Unix()) + return spec, nil +} + // Parse parses the spec and returns a cron.Schedule // Unlike the default cron parser, Parse uses UTC timezone as the default if none is specified. func (s *ActionScheduleSpec) Parse() (cron.Schedule, error) { @@ -43,19 +59,29 @@ func (s *ActionScheduleSpec) Parse() (cron.Schedule, error) { return nil, err } - // If the spec has specified a timezone, use it - if strings.HasPrefix(s.Spec, "TZ=") || strings.HasPrefix(s.Spec, "CRON_TZ=") { - return schedule, nil - } - specSchedule, ok := schedule.(*cron.SpecSchedule) // If it's not a spec schedule, like "@every 5m", timezone is not relevant if !ok { return schedule, nil } - // Set the timezone to UTC - specSchedule.Location = time.UTC + // If `timezone` is not defined in the workflow, but the spec includes a timezone, use it. + if !s.TimeZone.Has() && (strings.HasPrefix(s.Spec, "TZ=") || strings.HasPrefix(s.Spec, "CRON_TZ=")) { + return schedule, nil + } + + var location *time.Location + if present, tz := s.TimeZone.Get(); present { + location, err = time.LoadLocation(tz) + if err != nil { + return nil, err + } + } else { + // UTC is the default time zone. + location = time.UTC + } + + specSchedule.Location = location return specSchedule, nil } diff --git a/models/actions/schedule_spec_test.go b/models/actions/schedule_spec_test.go index 0c26fce4b2..eb3a83d0a6 100644 --- a/models/actions/schedule_spec_test.go +++ b/models/actions/schedule_spec_test.go @@ -7,6 +7,8 @@ import ( "testing" "time" + "forgejo.org/modules/optional" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -21,50 +23,105 @@ func TestActionScheduleSpec_Parse(t *testing.T) { }() time.Local = tz - now, err := time.Parse(time.RFC3339, "2024-07-31T15:47:55+08:00") - require.NoError(t, err) - tests := []struct { - name string - spec string - want string - wantErr assert.ErrorAssertionFunc + name string + refTime time.Time + spec string + timeZone string + want string + wantErr assert.ErrorAssertionFunc }{ { name: "regular", + refTime: time.Date(2024, 7, 31, 15, 47, 55, 0, time.Local), spec: "0 10 * * *", want: "2024-07-31T10:00:00Z", wantErr: assert.NoError, }, { name: "invalid", + refTime: time.Date(2024, 7, 31, 15, 47, 55, 0, time.Local), spec: "0 10 * *", want: "", wantErr: assert.Error, }, { - name: "with timezone", + name: "with TZ in cron schedule", + refTime: time.Date(2024, 7, 31, 15, 47, 55, 0, time.Local), spec: "TZ=America/New_York 0 10 * * *", want: "2024-07-31T14:00:00Z", wantErr: assert.NoError, }, { - name: "timezone irrelevant", + name: "with CRON_TZ in cron schedule", + refTime: time.Date(2024, 7, 31, 15, 47, 55, 0, time.Local), + spec: "CRON_TZ=America/New_York 0 10 * * *", + want: "2024-07-31T14:00:00Z", + wantErr: assert.NoError, + }, + { + name: "with separate time zone", + refTime: time.Date(2024, 7, 31, 15, 47, 55, 0, time.Local), + spec: "0 10 * * *", + timeZone: "America/New_York", + want: "2024-07-31T14:00:00Z", + wantErr: assert.NoError, + }, + { + name: "separate time zone takes precedence over inlined time zone", + refTime: time.Date(2024, 7, 31, 15, 47, 55, 0, time.Local), + spec: "CRON_TZ=Europe/Berlin 0 10 * * *", + timeZone: "America/New_York", + want: "2024-07-31T14:00:00Z", + wantErr: assert.NoError, + }, + { + name: "time zone irrelevant", + refTime: time.Date(2024, 7, 31, 15, 47, 55, 0, time.Local), spec: "@every 5m", want: "2024-07-31T07:52:55Z", wantErr: assert.NoError, }, + { + // The various cron implementations handle the DST jump forwards differently. The most popular approaches + // are (a) scheduling all jobs at 3 o'clock that were supposed to run between 2 and 3 o'clock, or (b) + // skipping the execution on that day because any time between 2 and 3 o'clock never happened. Forgejo uses + // option B because the code it inherited already did that and was exposed to users. + name: "skips execution during DST jump forwards", + refTime: time.Date(2025, 3, 30, 1, 5, 0, 0, time.UTC), + spec: "10 2 * * *", // The clock jumps at 2 o'clock to 3 o'clock. + timeZone: "Europe/Berlin", + want: "2025-03-31T00:10:00Z", + wantErr: assert.NoError, + }, + { + name: "executes a first time before DST jump backwards", + refTime: time.Date(2025, 10, 26, 0, 5, 0, 0, time.UTC), + spec: "10 2 * * *", // The clock jumps at 3 o'clock to 2 o'clock. + timeZone: "Europe/Berlin", + want: "2025-10-26T00:10:00Z", + wantErr: assert.NoError, + }, + { + name: "executes a second time after DST jump backwards", + refTime: time.Date(2025, 10, 26, 1, 5, 0, 0, time.UTC), + spec: "10 2 * * *", // The clock jumps at 3 o'clock to 2 o'clock. + timeZone: "Europe/Berlin", + want: "2025-10-26T01:10:00Z", + wantErr: assert.NoError, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { s := &ActionScheduleSpec{ - Spec: tt.spec, + Spec: tt.spec, + TimeZone: optional.FromNonDefault(tt.timeZone), } got, err := s.Parse() tt.wantErr(t, err) if err == nil { - assert.Equal(t, tt.want, got.Next(now).UTC().Format(time.RFC3339)) + assert.Equal(t, tt.want, got.Next(tt.refTime).UTC().Format(time.RFC3339)) } }) } diff --git a/models/actions/schedule_test.go b/models/actions/schedule_test.go new file mode 100644 index 0000000000..016185cb42 --- /dev/null +++ b/models/actions/schedule_test.go @@ -0,0 +1,102 @@ +// Copyright 2026 The Forgejo Authors. All rights reserved. +// SPDX-License-Identifier: GPL-3.0-or-later + +package actions + +import ( + "testing" + "time" + + "forgejo.org/models/db" + "forgejo.org/models/repo" + "forgejo.org/models/unittest" + "forgejo.org/models/user" + "forgejo.org/modules/optional" + "forgejo.org/modules/timeutil" + "forgejo.org/modules/webhook" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestScheduleCreateScheduleTask(t *testing.T) { + require.NoError(t, unittest.PrepareTestDatabase()) + + user2 := unittest.AssertExistsAndLoadBean(t, &user.User{ID: 2}) + repo62 := unittest.AssertExistsAndLoadBean(t, &repo.Repository{ID: 62, Name: "test_workflows", OwnerID: user2.ID}) + + content := ` +on: + push: + schedule: + - cron: "2 13 * * *" + - cron: "03 13 * * *" + timezone: Europe/Paris +jobs: + test: + runs-on: debian + steps: + - run: | + echo "OK" +` + + referenceTime := time.Date(2026, 3, 27, 17, 41, 21, 0, time.UTC) + + specWithoutTZ, err := NewActionScheduleSpec("2 13 * * *", optional.None[string](), referenceTime) + require.NoError(t, err) + + specWithTZ, err := NewActionScheduleSpec("3 13 * * *", optional.Some("Europe/Paris"), referenceTime) + require.NoError(t, err) + + schedule := &ActionSchedule{ + Title: ".forgejo/workflows/test.yaml", + Specs: []*ActionScheduleSpec{specWithoutTZ, specWithTZ}, + RepoID: repo62.ID, + OwnerID: user2.ID, + WorkflowID: "test.yaml", + WorkflowDirectory: ".forgejo/workflows", + TriggerUserID: -2, + Ref: "main", + CommitSHA: "6af834a5bc97c1a337eb3a21d26903c5cdceca0c", + Event: webhook.HookEventPush, + EventPayload: "{\"action\":\"schedule\"}", + Content: []byte(content), + } + + err = CreateScheduleTask(t.Context(), []*ActionSchedule{schedule}) + require.NoError(t, err) + + schedules, err := db.Find[ActionSchedule](t.Context(), FindScheduleOptions{OwnerID: user2.ID, RepoID: repo62.ID}) + require.NoError(t, err) + require.Len(t, schedules, 1) + + assert.NotZero(t, schedules[0].ID) + assert.Equal(t, ".forgejo/workflows/test.yaml", schedules[0].Title) + assert.Equal(t, "test.yaml", schedules[0].WorkflowID) + assert.Equal(t, ".forgejo/workflows", schedules[0].WorkflowDirectory) + assert.Equal(t, int64(-2), schedules[0].TriggerUserID) + assert.Equal(t, "main", schedules[0].Ref) + assert.Equal(t, "6af834a5bc97c1a337eb3a21d26903c5cdceca0c", schedules[0].CommitSHA) + assert.Equal(t, webhook.HookEventPush, schedules[0].Event) + assert.JSONEq(t, "{\"action\":\"schedule\"}", schedules[0].EventPayload) + assert.Equal(t, []byte(content), schedules[0].Content) + + specs, total, err := FindSpecs(t.Context(), FindSpecOptions{RepoID: repo62.ID}) + require.NoError(t, err) + + assert.Equal(t, int64(2), total) + + assert.NotZero(t, specs[0].ID) + assert.Equal(t, schedules[0].ID, specs[0].ScheduleID) + assert.Equal(t, timeutil.TimeStamp(1774699380), specs[0].Next) + assert.Equal(t, "3 13 * * *", specs[0].Spec) + assert.Equal(t, optional.Some("Europe/Paris"), specs[0].TimeZone) + assert.Zero(t, specs[0].Prev) + + assert.NotZero(t, specs[1].ID) + assert.Equal(t, schedules[0].ID, specs[1].ScheduleID) + assert.Equal(t, timeutil.TimeStamp(1774702920), specs[1].Next) + assert.Equal(t, "2 13 * * *", specs[1].Spec) + assert.Equal(t, optional.None[string](), specs[1].TimeZone) + assert.Zero(t, specs[1].Prev) +} diff --git a/models/forgejo_migrations/v15c_add_schedule_spec_time_zones.go b/models/forgejo_migrations/v15c_add_schedule_spec_time_zones.go new file mode 100644 index 0000000000..d72d585725 --- /dev/null +++ b/models/forgejo_migrations/v15c_add_schedule_spec_time_zones.go @@ -0,0 +1,31 @@ +// Copyright 2026 The Forgejo Authors. All rights reserved. +// SPDX-License-Identifier: GPL-3.0-or-later + +package forgejo_migrations + +import ( + "forgejo.org/modules/optional" + + "xorm.io/xorm" +) + +func init() { + registerMigration(&Migration{ + Description: "add time zone support to action_schedule_spec", + Upgrade: addActionScheduleSpecTimeZone, + }) +} + +func addActionScheduleSpecTimeZone(x *xorm.Engine) error { + type ActionScheduleSpec struct { + TimeZone optional.Option[string] + } + + _, err := x.SyncWithOptions(xorm.SyncOptions{IgnoreDropIndices: true}, new(ActionScheduleSpec)) + if err != nil { + return err + } + + _, err = x.Exec("ALTER TABLE action_schedule DROP COLUMN `specs`") + return err +} diff --git a/services/actions/TestServiceActions_startTask/action_schedule.yml b/services/actions/TestServiceActions_startTask/action_schedule.yml index d0e7234475..8102e3f9e3 100644 --- a/services/actions/TestServiceActions_startTask/action_schedule.yml +++ b/services/actions/TestServiceActions_startTask/action_schedule.yml @@ -2,8 +2,6 @@ - id: 1 title: schedule_title1 - specs: - - '* * * * *' repo_id: 4 owner_id: 2 workflow_id: 'workflow1.yml' @@ -23,8 +21,6 @@ - id: 2 title: schedule_title2 - specs: - - '* * * * *' repo_id: 4 owner_id: 2 workflow_id: 'workflow2.yml' diff --git a/services/actions/notifier_helper.go b/services/actions/notifier_helper.go index c6af9b5b82..5e048a83ad 100644 --- a/services/actions/notifier_helper.go +++ b/services/actions/notifier_helper.go @@ -10,6 +10,7 @@ import ( "fmt" "slices" "strings" + "time" actions_model "forgejo.org/models/actions" "forgejo.org/models/db" @@ -24,6 +25,7 @@ import ( "forgejo.org/modules/gitrepo" "forgejo.org/modules/json" "forgejo.org/modules/log" + "forgejo.org/modules/optional" "forgejo.org/modules/setting" api "forgejo.org/modules/structs" "forgejo.org/modules/util" @@ -574,6 +576,16 @@ func handleSchedules( continue } + now := time.Now() + specs := make([]*actions_model.ActionScheduleSpec, 0, len(schedules)) + for _, schedule := range schedules { + scheduleSpec, err := actions_model.NewActionScheduleSpec(schedule.Cron, optional.FromNonDefault(schedule.TimeZone), now) + if err != nil { + return err + } + specs = append(specs, scheduleSpec) + } + title := workflow.Name if len(title) < 1 { title = dwf.GetWorkflowPath() @@ -590,7 +602,7 @@ func handleSchedules( CommitSHA: commit.ID.String(), Event: input.Event, EventPayload: string(p), - Specs: schedules, + Specs: specs, Content: dwf.Content, } crons = append(crons, run) diff --git a/services/actions/schedule_tasks_test.go b/services/actions/schedule_tasks_test.go index 9bf964fd90..57ee6b955a 100644 --- a/services/actions/schedule_tasks_test.go +++ b/services/actions/schedule_tasks_test.go @@ -6,12 +6,14 @@ package actions import ( "context" "testing" + "time" actions_model "forgejo.org/models/actions" "forgejo.org/models/db" repo_model "forgejo.org/models/repo" "forgejo.org/models/unit" "forgejo.org/models/unittest" + "forgejo.org/modules/optional" "forgejo.org/modules/test" "forgejo.org/modules/timeutil" webhook_module "forgejo.org/modules/webhook" @@ -29,6 +31,9 @@ func TestServiceActions_startTask(t *testing.T) { // Load fixtures that are corrupted and create one valid scheduled workflow repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 4}) + spec, err := actions_model.NewActionScheduleSpec("* * * * *", optional.None[string](), time.Now()) + require.NoError(t, err) + workflowID := "some.yml" schedules := []*actions_model.ActionSchedule{ { @@ -42,7 +47,7 @@ func TestServiceActions_startTask(t *testing.T) { CommitSHA: "fakeSHA", Event: webhook_module.HookEventSchedule, EventPayload: "fakepayload", - Specs: []string{"* * * * *"}, + Specs: []*actions_model.ActionScheduleSpec{spec}, Content: []byte( ` jobs: @@ -57,7 +62,7 @@ jobs: require.Equal(t, 2, unittest.GetCount(t, actions_model.ActionScheduleSpec{})) require.NoError(t, actions_model.CreateScheduleTask(t.Context(), schedules)) require.Equal(t, 3, unittest.GetCount(t, actions_model.ActionScheduleSpec{})) - _, err := db.GetEngine(db.DefaultContext).Exec("UPDATE `action_schedule_spec` SET next = 1") + _, err = db.GetEngine(db.DefaultContext).Exec("UPDATE `action_schedule_spec` SET next = 1") require.NoError(t, err) // After running startTasks an ActionRun row is created for the valid scheduled workflow @@ -291,6 +296,9 @@ func TestServiceActions_DynamicMatrix(t *testing.T) { // Load fixtures that are corrupted and create one valid scheduled workflow repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 4}) + spec, err := actions_model.NewActionScheduleSpec("* * * * *", optional.None[string](), time.Now()) + require.NoError(t, err) + workflowID := "some.yml" schedules := []*actions_model.ActionSchedule{ { @@ -304,7 +312,7 @@ func TestServiceActions_DynamicMatrix(t *testing.T) { CommitSHA: "fakeSHA", Event: webhook_module.HookEventSchedule, EventPayload: "fakepayload", - Specs: []string{"* * * * *"}, + Specs: []*actions_model.ActionScheduleSpec{spec}, Content: []byte( ` jobs: @@ -322,7 +330,7 @@ jobs: require.Equal(t, 2, unittest.GetCount(t, actions_model.ActionScheduleSpec{})) require.NoError(t, actions_model.CreateScheduleTask(t.Context(), schedules)) require.Equal(t, 3, unittest.GetCount(t, actions_model.ActionScheduleSpec{})) - _, err := db.GetEngine(db.DefaultContext).Exec("UPDATE `action_schedule_spec` SET next = 1") + _, err = db.GetEngine(db.DefaultContext).Exec("UPDATE `action_schedule_spec` SET next = 1") require.NoError(t, err) // After running startTasks an ActionRun row is created for the valid scheduled workflow @@ -354,6 +362,9 @@ func TestServiceActions_RunsOnNeeds(t *testing.T) { // Load fixtures that are corrupted and create one valid scheduled workflow repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 4}) + spec, err := actions_model.NewActionScheduleSpec("* * * * *", optional.None[string](), time.Now()) + require.NoError(t, err) + workflowID := "some.yml" schedules := []*actions_model.ActionSchedule{ { @@ -366,7 +377,7 @@ func TestServiceActions_RunsOnNeeds(t *testing.T) { CommitSHA: "fakeSHA", Event: webhook_module.HookEventSchedule, EventPayload: "fakepayload", - Specs: []string{"* * * * *"}, + Specs: []*actions_model.ActionScheduleSpec{spec}, Content: []byte( ` jobs: @@ -381,7 +392,7 @@ jobs: require.Equal(t, 2, unittest.GetCount(t, actions_model.ActionScheduleSpec{})) require.NoError(t, actions_model.CreateScheduleTask(t.Context(), schedules)) require.Equal(t, 3, unittest.GetCount(t, actions_model.ActionScheduleSpec{})) - _, err := db.GetEngine(db.DefaultContext).Exec("UPDATE `action_schedule_spec` SET next = 1") + _, err = db.GetEngine(db.DefaultContext).Exec("UPDATE `action_schedule_spec` SET next = 1") require.NoError(t, err) // After running startTasks an ActionRun row is created for the valid scheduled workflow @@ -440,6 +451,9 @@ func TestServiceActions_ExpandReusableWorkflow(t *testing.T) { // Load fixtures that are corrupted and create one valid scheduled workflow repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 4}) + spec, err := actions_model.NewActionScheduleSpec("* * * * *", optional.None[string](), time.Now()) + require.NoError(t, err) + workflowID := "some.yml" schedules := []*actions_model.ActionSchedule{ { @@ -452,7 +466,7 @@ func TestServiceActions_ExpandReusableWorkflow(t *testing.T) { CommitSHA: "fakeSHA", Event: webhook_module.HookEventSchedule, EventPayload: "fakepayload", - Specs: []string{"* * * * *"}, + Specs: []*actions_model.ActionScheduleSpec{spec}, Content: []byte( ` jobs: @@ -467,7 +481,7 @@ jobs: require.Equal(t, 2, unittest.GetCount(t, actions_model.ActionScheduleSpec{})) require.NoError(t, actions_model.CreateScheduleTask(t.Context(), schedules)) require.Equal(t, 3, unittest.GetCount(t, actions_model.ActionScheduleSpec{})) - _, err := db.GetEngine(db.DefaultContext).Exec("UPDATE `action_schedule_spec` SET next = 1") + _, err = db.GetEngine(db.DefaultContext).Exec("UPDATE `action_schedule_spec` SET next = 1") require.NoError(t, err) // After running startTasks an ActionRun row is created for the valid scheduled workflow diff --git a/tests/integration/actions_trigger_test.go b/tests/integration/actions_trigger_test.go index 4de8d625ab..914acadf14 100644 --- a/tests/integration/actions_trigger_test.go +++ b/tests/integration/actions_trigger_test.go @@ -7,6 +7,7 @@ import ( "fmt" "net/http" "net/url" + "slices" "strings" "testing" "time" @@ -23,6 +24,7 @@ import ( actions_module "forgejo.org/modules/actions" "forgejo.org/modules/git" "forgejo.org/modules/gitrepo" + "forgejo.org/modules/optional" "forgejo.org/modules/setting" api "forgejo.org/modules/structs" "forgejo.org/modules/test" @@ -1136,13 +1138,18 @@ func TestActionsWorkflowDispatchConcurrencyGroup(t *testing.T) { } func TestActionsScheduledWorkflow(t *testing.T) { + type expectedSpec struct { + cron string + timeZone optional.Option[string] + } + testCases := []struct { name string workflowID string workflowDirectory string workflowContent string expectedWorkflowTitle string - expectedCronSpecs []string + expectedCronSpecs []expectedSpec }{ { name: "GitHub", @@ -1158,7 +1165,7 @@ jobs: - run: echo OK `, expectedWorkflowTitle: ".github/workflows/scheduled.yml", - expectedCronSpecs: []string{"30 5,17 * * *"}, + expectedCronSpecs: []expectedSpec{{cron: "30 5,17 * * *", timeZone: optional.None[string]()}}, }, { name: "Gitea", @@ -1175,7 +1182,28 @@ jobs: - run: echo OK `, expectedWorkflowTitle: "My scheduled workflow", - expectedCronSpecs: []string{"* * * * *"}, + expectedCronSpecs: []expectedSpec{{cron: "* * * * *", timeZone: optional.None[string]()}}, + }, + { + name: "Forgejo with time zone", + workflowID: "tz.yml", + workflowDirectory: ".forgejo/workflows", + workflowContent: ` +on: + schedule: + - cron: "44 10 * * *" + - cron: "25 19 * * *" + timezone: Europe/Madrid +jobs: + test: + steps: + - run: echo OK +`, + expectedWorkflowTitle: ".forgejo/workflows/tz.yml", + expectedCronSpecs: []expectedSpec{ + {cron: "44 10 * * *", timeZone: optional.None[string]()}, + {cron: "25 19 * * *", timeZone: optional.Some("Europe/Madrid")}, + }, }, } onApplicationRun(t, func(t *testing.T, u *url.URL) { @@ -1201,7 +1229,6 @@ jobs: require.Len(t, schedules, 1) assert.Equal(t, testCase.expectedWorkflowTitle, schedules[0].Title) - assert.Equal(t, testCase.expectedCronSpecs, schedules[0].Specs) assert.Equal(t, repo.ID, schedules[0].RepoID) assert.Equal(t, repo.OwnerID, schedules[0].OwnerID) assert.Equal(t, testCase.workflowID, schedules[0].WorkflowID) @@ -1210,6 +1237,20 @@ jobs: assert.Equal(t, sha, schedules[0].CommitSHA) assert.Equal(t, webhook_module.HookEventPush, schedules[0].Event) assert.Equal(t, []byte(testCase.workflowContent), schedules[0].Content) + + specs, total, err := actions_model.FindSpecs(t.Context(), actions_model.FindSpecOptions{RepoID: repo.ID}) + require.NoError(t, err) + + assert.Equal(t, int64(len(testCase.expectedCronSpecs)), total) + + // The query to return cron specs orders by `id DESC`. + slices.Reverse(testCase.expectedCronSpecs) + + for i, expected := range testCase.expectedCronSpecs { + assert.Equal(t, schedules[0].ID, specs[i].ScheduleID) + assert.Equal(t, expected.cron, specs[i].Spec) + assert.Equal(t, expected.timeZone, specs[i].TimeZone) + } }) } }) From 80d840c1284e4f44b9efac208811b9ed26455ade Mon Sep 17 00:00:00 2001 From: grangelouis Date: Sat, 4 Apr 2026 21:58:16 +0200 Subject: [PATCH 085/352] fix: missing syntax dialog rounded corners (#11945) Fixes #11299 Reviewed-on: https://codeberg.org/forgejo/forgejo/pulls/11945 Reviewed-by: Beowulf Co-authored-by: grangelouis Co-committed-by: grangelouis --- web_src/css/modules/dialog.css | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/web_src/css/modules/dialog.css b/web_src/css/modules/dialog.css index 897eb838cf..3f9a1a44bf 100644 --- a/web_src/css/modules/dialog.css +++ b/web_src/css/modules/dialog.css @@ -88,6 +88,11 @@ dialog .actions { padding: 1rem; } +dialog footer { + border-bottom-left-radius: var(--border-radius); + border-bottom-right-radius: var(--border-radius); +} + /* positive/negative action buttons */ dialog .actions .ui.button { display: inline-flex; From 90ca6116954c0471280c6576e36c67577ff22c88 Mon Sep 17 00:00:00 2001 From: Renovate Bot Date: Sun, 5 Apr 2026 02:15:52 +0200 Subject: [PATCH 086/352] Update dependency mermaid to v11.14.0 (forgejo) (#11990) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR contains the following updates: | Package | Change | [Age](https://docs.renovatebot.com/merge-confidence/) | [Confidence](https://docs.renovatebot.com/merge-confidence/) | |---|---|---|---| | [mermaid](https://github.com/mermaid-js/mermaid) | [`11.13.0` → `11.14.0`](https://renovatebot.com/diffs/npm/mermaid/11.13.0/11.14.0) | ![age](https://developer.mend.io/api/mc/badges/age/npm/mermaid/11.14.0?slim=true) | ![confidence](https://developer.mend.io/api/mc/badges/confidence/npm/mermaid/11.13.0/11.14.0?slim=true) | --- ### Release Notes
mermaid-js/mermaid (mermaid) ### [`v11.14.0`](https://github.com/mermaid-js/mermaid/releases/tag/mermaid%4011.14.0) [Compare Source](https://github.com/mermaid-js/mermaid/compare/mermaid@11.13.0...mermaid@11.14.0) Thanks to our awesome mermaid community that contributed to this release: [@​ashishjain0512](https://github.com/ashishjain0512), [@​tractorjuice](https://github.com/tractorjuice), [@​autofix-ci\[bot\]](https://github.com/autofix-ci%5Bbot%5D), [@​aloisklink](https://github.com/aloisklink), [@​knsv](https://github.com/knsv), [@​kibanana](https://github.com/kibanana), [@​chandershekhar22](https://github.com/chandershekhar22), [@​khalil](https://github.com/khalil), [@​ytatsuno](https://github.com/ytatsuno), [@​sidharthv96](https://github.com/sidharthv96), [@​github-actions\[bot\]](https://github.com/github-actions%5Bbot%5D), [@​dripcoding](https://github.com/dripcoding), [@​knsv-bot](https://github.com/knsv-bot), [@​jeroensmink98](https://github.com/jeroensmink98), [@​Alex9583](https://github.com/Alex9583), [@​GhassenS](https://github.com/GhassenS), [@​omkarht](https://github.com/omkarht), [@​darshanr0107](https://github.com/darshanr0107), [@​leentaylor](https://github.com/leentaylor), [@​lee-treehouse](https://github.com/lee-treehouse), [@​veeceey](https://github.com/veeceey), [@​turntrout](https://github.com/turntrout), [@​Mermaid-Chart](https://github.com/Mermaid-Chart), [@​BambioGaming](https://github.com/BambioGaming), Claude ### Releases #### [@​mermaid-js/examples](https://github.com/mermaid-js/examples)@​1.2.0 ##### Minor Changes - [#​7526](https://github.com/mermaid-js/mermaid/pull/7526) [`efe218a`](https://github.com/mermaid-js/mermaid/commit/efe218a47fb5a4c2bd5489b48ce69213b141e519) - add new TreeView diagram #### mermaid\@​11.14.0 ##### Minor Changes - [#​7526](https://github.com/mermaid-js/mermaid/pull/7526) [`efe218a`](https://github.com/mermaid-js/mermaid/commit/efe218a47fb5a4c2bd5489b48ce69213b141e519) - Add Wardley Maps diagram type (beta) Adds Wardley Maps as a new diagram type to Mermaid (available as `wardley-beta`). Wardley Maps are visual representations of business strategy that help map value chains and component evolution. Features: - Component positioning with \[visibility, evolution] coordinates (OWM format) - Anchors for users/customers - Multiple link types: dependencies, flows, labeled links - Evolution arrows and trend indicators - Custom evolution stages with optional dual labels - Custom stage widths using [@​boundary](https://github.com/boundary) notation - Pipeline components with visibility inheritance - Annotations, notes, and visual elements - Source strategy markers: build, buy, outsource, market - Inertia indicators - Theme integration Implementation includes parser, D3.js renderer, unit tests, E2E tests, and comprehensive documentation. - [#​7526](https://github.com/mermaid-js/mermaid/pull/7526) [`efe218a`](https://github.com/mermaid-js/mermaid/commit/efe218a47fb5a4c2bd5489b48ce69213b141e519) - feat: implement neo look styling for state diagrams - [#​7526](https://github.com/mermaid-js/mermaid/pull/7526) [`efe218a`](https://github.com/mermaid-js/mermaid/commit/efe218a47fb5a4c2bd5489b48ce69213b141e519) - feat: implement neo look support for sequence diagrams with drop shadows, and enhanced styling - [#​7526](https://github.com/mermaid-js/mermaid/pull/7526) [`efe218a`](https://github.com/mermaid-js/mermaid/commit/efe218a47fb5a4c2bd5489b48ce69213b141e519) - feat: add `randomize` config option for architecture diagrams, defaulting to `false` for deterministic layout - [#​7526](https://github.com/mermaid-js/mermaid/pull/7526) [`efe218a`](https://github.com/mermaid-js/mermaid/commit/efe218a47fb5a4c2bd5489b48ce69213b141e519) - feat: Add option to change timeline direction - [#​7526](https://github.com/mermaid-js/mermaid/pull/7526) [`efe218a`](https://github.com/mermaid-js/mermaid/commit/efe218a47fb5a4c2bd5489b48ce69213b141e519) - Fix duplicate SVG element IDs when rendering multiple diagrams on the same page. Internal element IDs (nodes, edges, markers, clusters) are now prefixed with the diagram's SVG element ID across all diagram types. Custom CSS or JS using exact ID selectors like `#arrowhead` should use attribute-ending selectors like `[id$="-arrowhead"]` instead. - [#​7526](https://github.com/mermaid-js/mermaid/pull/7526) [`efe218a`](https://github.com/mermaid-js/mermaid/commit/efe218a47fb5a4c2bd5489b48ce69213b141e519) - feat: implement neo look styling for ER diagrams - [#​7526](https://github.com/mermaid-js/mermaid/pull/7526) [`efe218a`](https://github.com/mermaid-js/mermaid/commit/efe218a47fb5a4c2bd5489b48ce69213b141e519) - feat: implement neo look styling for requirement diagrams - [#​7526](https://github.com/mermaid-js/mermaid/pull/7526) [`efe218a`](https://github.com/mermaid-js/mermaid/commit/efe218a47fb5a4c2bd5489b48ce69213b141e519) - feat: add theme support for data label colour in xy chart - [#​7526](https://github.com/mermaid-js/mermaid/pull/7526) [`efe218a`](https://github.com/mermaid-js/mermaid/commit/efe218a47fb5a4c2bd5489b48ce69213b141e519) - feat: implement neo look styling for mindmap diagrams - [#​7526](https://github.com/mermaid-js/mermaid/pull/7526) [`efe218a`](https://github.com/mermaid-js/mermaid/commit/efe218a47fb5a4c2bd5489b48ce69213b141e519) - feat: implement neo look for mermaid flowchart diagrams - [#​7526](https://github.com/mermaid-js/mermaid/pull/7526) [`efe218a`](https://github.com/mermaid-js/mermaid/commit/efe218a47fb5a4c2bd5489b48ce69213b141e519) - feat: implement neo look and themes for class diagram - [#​7526](https://github.com/mermaid-js/mermaid/pull/7526) [`efe218a`](https://github.com/mermaid-js/mermaid/commit/efe218a47fb5a4c2bd5489b48ce69213b141e519) - feat: add showDataLabelOutsideBar option for xy chart - [#​7526](https://github.com/mermaid-js/mermaid/pull/7526) [`efe218a`](https://github.com/mermaid-js/mermaid/commit/efe218a47fb5a4c2bd5489b48ce69213b141e519) - feat: implement neo look support for timeline diagram with drop shadows, additoinal redux themes and enhanced styling - [#​7526](https://github.com/mermaid-js/mermaid/pull/7526) [`efe218a`](https://github.com/mermaid-js/mermaid/commit/efe218a47fb5a4c2bd5489b48ce69213b141e519) - feat: implement neo look and themes for gitGraph diagram - [#​7526](https://github.com/mermaid-js/mermaid/pull/7526) [`efe218a`](https://github.com/mermaid-js/mermaid/commit/efe218a47fb5a4c2bd5489b48ce69213b141e519) - add new TreeView diagram ##### Patch Changes - [#​7526](https://github.com/mermaid-js/mermaid/pull/7526) [`efe218a`](https://github.com/mermaid-js/mermaid/commit/efe218a47fb5a4c2bd5489b48ce69213b141e519) - add link to ishikawa diagram on mermaid.js.org - [#​7526](https://github.com/mermaid-js/mermaid/pull/7526) [`efe218a`](https://github.com/mermaid-js/mermaid/commit/efe218a47fb5a4c2bd5489b48ce69213b141e519) - docs: document valid duration token formats in gantt.md - [#​7526](https://github.com/mermaid-js/mermaid/pull/7526) [`efe218a`](https://github.com/mermaid-js/mermaid/commit/efe218a47fb5a4c2bd5489b48ce69213b141e519) - fix: ER diagram parsing when using "1" as entity identifier on right side The parser was incorrectly tokenizing the second "1" in patterns like `a many to 1 1:` because the lookahead rule only checked for alphabetic characters after whitespace, not digits. Added a new lookahead pattern `"1"(?=\s+[0-9])` to correctly identify the cardinality alias before a numeric entity name. Fixes [#​7472](https://github.com/mermaid-js/mermaid/issues/7472) - [#​7526](https://github.com/mermaid-js/mermaid/pull/7526) [`efe218a`](https://github.com/mermaid-js/mermaid/commit/efe218a47fb5a4c2bd5489b48ce69213b141e519) - fix: scope cytoscape label style mapping to edges with labels to prevent console warnings - [#​7526](https://github.com/mermaid-js/mermaid/pull/7526) [`efe218a`](https://github.com/mermaid-js/mermaid/commit/efe218a47fb5a4c2bd5489b48ce69213b141e519) - fix: support inline annotation syntax in class diagrams (class Shape <>) - [#​7526](https://github.com/mermaid-js/mermaid/pull/7526) [`efe218a`](https://github.com/mermaid-js/mermaid/commit/efe218a47fb5a4c2bd5489b48ce69213b141e519) - fix: Align branch label background with text for multi-line labels in LR GitGraph layout - [#​7526](https://github.com/mermaid-js/mermaid/pull/7526) [`efe218a`](https://github.com/mermaid-js/mermaid/commit/efe218a47fb5a4c2bd5489b48ce69213b141e519) - fix: preserve cause hierarchy when ishikawa effect is indented more than causes - [#​7526](https://github.com/mermaid-js/mermaid/pull/7526) [`efe218a`](https://github.com/mermaid-js/mermaid/commit/efe218a47fb5a4c2bd5489b48ce69213b141e519) - refactor: remove unused createGraphWithElements function and add regression test for open edge arrowheads - [#​7526](https://github.com/mermaid-js/mermaid/pull/7526) [`efe218a`](https://github.com/mermaid-js/mermaid/commit/efe218a47fb5a4c2bd5489b48ce69213b141e519) - fix: Prevent long pie chart titles from being clipped by expanding the viewBox - [#​7526](https://github.com/mermaid-js/mermaid/pull/7526) [`efe218a`](https://github.com/mermaid-js/mermaid/commit/efe218a47fb5a4c2bd5489b48ce69213b141e519) - fix: prevent sequence diagram hang when "as" is used without a trailing space in participant declarations - [#​7526](https://github.com/mermaid-js/mermaid/pull/7526) [`efe218a`](https://github.com/mermaid-js/mermaid/commit/efe218a47fb5a4c2bd5489b48ce69213b141e519) - fix: warn when `style` statement targets a non-existent node in flowcharts - [#​7526](https://github.com/mermaid-js/mermaid/pull/7526) [`efe218a`](https://github.com/mermaid-js/mermaid/commit/efe218a47fb5a4c2bd5489b48ce69213b141e519) - fix: group state diagram SVG children under single root element - [#​7526](https://github.com/mermaid-js/mermaid/pull/7526) [`efe218a`](https://github.com/mermaid-js/mermaid/commit/efe218a47fb5a4c2bd5489b48ce69213b141e519) - fix: Allow :::className syntax inside composite state blocks - [#​7526](https://github.com/mermaid-js/mermaid/pull/7526) [`efe218a`](https://github.com/mermaid-js/mermaid/commit/efe218a47fb5a4c2bd5489b48ce69213b141e519) Thanks [@​aloisklink](https://github.com/aloisklink), [@​BambioGaming](https://github.com/BambioGaming)! - fix: prevent escaping `<` and `&` when `htmlLabels: false` - [#​7526](https://github.com/mermaid-js/mermaid/pull/7526) [`efe218a`](https://github.com/mermaid-js/mermaid/commit/efe218a47fb5a4c2bd5489b48ce69213b141e519) - fix: treemap title and labels use theme-aware colors for dark backgrounds - Updated dependencies \[[`efe218a`](https://github.com/mermaid-js/mermaid/commit/efe218a47fb5a4c2bd5489b48ce69213b141e519)]: - [@​mermaid-js/parser](https://github.com/mermaid-js/parser)@​1.1.0 #### [@​mermaid-js/parser](https://github.com/mermaid-js/parser)@​1.1.0 ##### Minor Changes - [#​7526](https://github.com/mermaid-js/mermaid/pull/7526) [`efe218a`](https://github.com/mermaid-js/mermaid/commit/efe218a47fb5a4c2bd5489b48ce69213b141e519) - add new TreeView diagram #### [@​mermaid-js/tiny](https://github.com/mermaid-js/tiny)@​11.14.0 ##### Minor Changes - [#​7526](https://github.com/mermaid-js/mermaid/pull/7526) [`efe218a`](https://github.com/mermaid-js/mermaid/commit/efe218a47fb5a4c2bd5489b48ce69213b141e519) - Add Wardley Maps diagram type (beta) Adds Wardley Maps as a new diagram type to Mermaid (available as `wardley-beta`). Wardley Maps are visual representations of business strategy that help map value chains and component evolution. Features: - Component positioning with \[visibility, evolution] coordinates (OWM format) - Anchors for users/customers - Multiple link types: dependencies, flows, labeled links - Evolution arrows and trend indicators - Custom evolution stages with optional dual labels - Custom stage widths using [@​boundary](https://github.com/boundary) notation - Pipeline components with visibility inheritance - Annotations, notes, and visual elements - Source strategy markers: build, buy, outsource, market - Inertia indicators - Theme integration Implementation includes parser, D3.js renderer, unit tests, E2E tests, and comprehensive documentation. - [#​7526](https://github.com/mermaid-js/mermaid/pull/7526) [`efe218a`](https://github.com/mermaid-js/mermaid/commit/efe218a47fb5a4c2bd5489b48ce69213b141e519) - feat: implement neo look styling for state diagrams - [#​7526](https://github.com/mermaid-js/mermaid/pull/7526) [`efe218a`](https://github.com/mermaid-js/mermaid/commit/efe218a47fb5a4c2bd5489b48ce69213b141e519) - feat: implement neo look support for sequence diagrams with drop shadows, and enhanced styling - [#​7526](https://github.com/mermaid-js/mermaid/pull/7526) [`efe218a`](https://github.com/mermaid-js/mermaid/commit/efe218a47fb5a4c2bd5489b48ce69213b141e519) - feat: add `randomize` config option for architecture diagrams, defaulting to `false` for deterministic layout - [#​7526](https://github.com/mermaid-js/mermaid/pull/7526) [`efe218a`](https://github.com/mermaid-js/mermaid/commit/efe218a47fb5a4c2bd5489b48ce69213b141e519) - feat: Add option to change timeline direction - [#​7526](https://github.com/mermaid-js/mermaid/pull/7526) [`efe218a`](https://github.com/mermaid-js/mermaid/commit/efe218a47fb5a4c2bd5489b48ce69213b141e519) - Fix duplicate SVG element IDs when rendering multiple diagrams on the same page. Internal element IDs (nodes, edges, markers, clusters) are now prefixed with the diagram's SVG element ID across all diagram types. Custom CSS or JS using exact ID selectors like `#arrowhead` should use attribute-ending selectors like `[id$="-arrowhead"]` instead. - [#​7526](https://github.com/mermaid-js/mermaid/pull/7526) [`efe218a`](https://github.com/mermaid-js/mermaid/commit/efe218a47fb5a4c2bd5489b48ce69213b141e519) - feat: implement neo look styling for ER diagrams - [#​7526](https://github.com/mermaid-js/mermaid/pull/7526) [`efe218a`](https://github.com/mermaid-js/mermaid/commit/efe218a47fb5a4c2bd5489b48ce69213b141e519) - feat: implement neo look styling for requirement diagrams - [#​7526](https://github.com/mermaid-js/mermaid/pull/7526) [`efe218a`](https://github.com/mermaid-js/mermaid/commit/efe218a47fb5a4c2bd5489b48ce69213b141e519) - feat: add theme support for data label colour in xy chart - [#​7526](https://github.com/mermaid-js/mermaid/pull/7526) [`efe218a`](https://github.com/mermaid-js/mermaid/commit/efe218a47fb5a4c2bd5489b48ce69213b141e519) - feat: implement neo look styling for mindmap diagrams - [#​7526](https://github.com/mermaid-js/mermaid/pull/7526) [`efe218a`](https://github.com/mermaid-js/mermaid/commit/efe218a47fb5a4c2bd5489b48ce69213b141e519) - feat: implement neo look for mermaid flowchart diagrams - [#​7526](https://github.com/mermaid-js/mermaid/pull/7526) [`efe218a`](https://github.com/mermaid-js/mermaid/commit/efe218a47fb5a4c2bd5489b48ce69213b141e519) - feat: implement neo look and themes for class diagram - [#​7526](https://github.com/mermaid-js/mermaid/pull/7526) [`efe218a`](https://github.com/mermaid-js/mermaid/commit/efe218a47fb5a4c2bd5489b48ce69213b141e519) - feat: add showDataLabelOutsideBar option for xy chart - [#​7526](https://github.com/mermaid-js/mermaid/pull/7526) [`efe218a`](https://github.com/mermaid-js/mermaid/commit/efe218a47fb5a4c2bd5489b48ce69213b141e519) - feat: implement neo look support for timeline diagram with drop shadows, additoinal redux themes and enhanced styling - [#​7526](https://github.com/mermaid-js/mermaid/pull/7526) [`efe218a`](https://github.com/mermaid-js/mermaid/commit/efe218a47fb5a4c2bd5489b48ce69213b141e519) - feat: implement neo look and themes for gitGraph diagram - [#​7526](https://github.com/mermaid-js/mermaid/pull/7526) [`efe218a`](https://github.com/mermaid-js/mermaid/commit/efe218a47fb5a4c2bd5489b48ce69213b141e519) - add new TreeView diagram ##### Patch Changes - [#​7526](https://github.com/mermaid-js/mermaid/pull/7526) [`efe218a`](https://github.com/mermaid-js/mermaid/commit/efe218a47fb5a4c2bd5489b48ce69213b141e519) - add link to ishikawa diagram on mermaid.js.org - [#​7526](https://github.com/mermaid-js/mermaid/pull/7526) [`efe218a`](https://github.com/mermaid-js/mermaid/commit/efe218a47fb5a4c2bd5489b48ce69213b141e519) - docs: document valid duration token formats in gantt.md - [#​7526](https://github.com/mermaid-js/mermaid/pull/7526) [`efe218a`](https://github.com/mermaid-js/mermaid/commit/efe218a47fb5a4c2bd5489b48ce69213b141e519) - fix: ER diagram parsing when using "1" as entity identifier on right side The parser was incorrectly tokenizing the second "1" in patterns like `a many to 1 1:` because the lookahead rule only checked for alphabetic characters after whitespace, not digits. Added a new lookahead pattern `"1"(?=\s+[0-9])` to correctly identify the cardinality alias before a numeric entity name. Fixes [#​7472](https://github.com/mermaid-js/mermaid/issues/7472) - [#​7526](https://github.com/mermaid-js/mermaid/pull/7526) [`efe218a`](https://github.com/mermaid-js/mermaid/commit/efe218a47fb5a4c2bd5489b48ce69213b141e519) - fix: scope cytoscape label style mapping to edges with labels to prevent console warnings - [#​7526](https://github.com/mermaid-js/mermaid/pull/7526) [`efe218a`](https://github.com/mermaid-js/mermaid/commit/efe218a47fb5a4c2bd5489b48ce69213b141e519) - fix: support inline annotation syntax in class diagrams (class Shape <>) - [#​7526](https://github.com/mermaid-js/mermaid/pull/7526) [`efe218a`](https://github.com/mermaid-js/mermaid/commit/efe218a47fb5a4c2bd5489b48ce69213b141e519) - fix: Align branch label background with text for multi-line labels in LR GitGraph layout - [#​7526](https://github.com/mermaid-js/mermaid/pull/7526) [`efe218a`](https://github.com/mermaid-js/mermaid/commit/efe218a47fb5a4c2bd5489b48ce69213b141e519) - fix: preserve cause hierarchy when ishikawa effect is indented more than causes - [#​7526](https://github.com/mermaid-js/mermaid/pull/7526) [`efe218a`](https://github.com/mermaid-js/mermaid/commit/efe218a47fb5a4c2bd5489b48ce69213b141e519) - refactor: remove unused createGraphWithElements function and add regression test for open edge arrowheads - [#​7526](https://github.com/mermaid-js/mermaid/pull/7526) [`efe218a`](https://github.com/mermaid-js/mermaid/commit/efe218a47fb5a4c2bd5489b48ce69213b141e519) - fix: Prevent long pie chart titles from being clipped by expanding the viewBox - [#​7526](https://github.com/mermaid-js/mermaid/pull/7526) [`efe218a`](https://github.com/mermaid-js/mermaid/commit/efe218a47fb5a4c2bd5489b48ce69213b141e519) - fix: prevent sequence diagram hang when "as" is used without a trailing space in participant declarations - [#​7526](https://github.com/mermaid-js/mermaid/pull/7526) [`efe218a`](https://github.com/mermaid-js/mermaid/commit/efe218a47fb5a4c2bd5489b48ce69213b141e519) - fix: warn when `style` statement targets a non-existent node in flowcharts - [#​7526](https://github.com/mermaid-js/mermaid/pull/7526) [`efe218a`](https://github.com/mermaid-js/mermaid/commit/efe218a47fb5a4c2bd5489b48ce69213b141e519) - fix: group state diagram SVG children under single root element - [#​7526](https://github.com/mermaid-js/mermaid/pull/7526) [`efe218a`](https://github.com/mermaid-js/mermaid/commit/efe218a47fb5a4c2bd5489b48ce69213b141e519) - fix: Allow :::className syntax inside composite state blocks - [#​7526](https://github.com/mermaid-js/mermaid/pull/7526) [`efe218a`](https://github.com/mermaid-js/mermaid/commit/efe218a47fb5a4c2bd5489b48ce69213b141e519) Thanks [@​aloisklink](https://github.com/aloisklink), [@​BambioGaming](https://github.com/BambioGaming)! - fix: prevent escaping `<` and `&` when `htmlLabels: false` - [#​7526](https://github.com/mermaid-js/mermaid/pull/7526) [`efe218a`](https://github.com/mermaid-js/mermaid/commit/efe218a47fb5a4c2bd5489b48ce69213b141e519) - fix: treemap title and labels use theme-aware colors for dark backgrounds - Updated dependencies \[[`efe218a`](https://github.com/mermaid-js/mermaid/commit/efe218a47fb5a4c2bd5489b48ce69213b141e519)]: - [@​mermaid-js/parser](https://github.com/mermaid-js/parser)@​1.1.0
--- ### Configuration 📅 **Schedule**: Branch creation - Between 12:00 AM and 03:59 AM ( * 0-3 * * * ) (UTC), Automerge - Between 12:00 AM and 03:59 AM ( * 0-3 * * * ) (UTC). 🚦 **Automerge**: Disabled by config. Please merge this manually once you are satisfied. ♻ **Rebasing**: Whenever PR becomes conflicted, or you tick the rebase/retry checkbox. 🔕 **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR has been generated by [Renovate Bot](https://github.com/renovatebot/renovate). Reviewed-on: https://codeberg.org/forgejo/forgejo/pulls/11990 Reviewed-by: Mathieu Fenniak Co-authored-by: Renovate Bot Co-committed-by: Renovate Bot --- package-lock.json | 10 +++++----- package.json | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/package-lock.json b/package-lock.json index 96688e44ef..72aeb9821b 100644 --- a/package-lock.json +++ b/package-lock.json @@ -55,7 +55,7 @@ "idiomorph": "0.3.0", "jquery": "3.7.1", "katex": "0.16.44", - "mermaid": "11.13.0", + "mermaid": "11.14.0", "mini-css-extract-plugin": "2.10.0", "minimatch": "10.2.4", "pdfobject": "2.3.0", @@ -11538,14 +11538,14 @@ } }, "node_modules/mermaid": { - "version": "11.13.0", - "resolved": "https://registry.npmjs.org/mermaid/-/mermaid-11.13.0.tgz", - "integrity": "sha512-fEnci+Immw6lKMFI8sqzjlATTyjLkRa6axrEgLV2yHTfv8r+h1wjFbV6xeRtd4rUV1cS4EpR9rwp3Rci7TRWDw==", + "version": "11.14.0", + "resolved": "https://registry.npmjs.org/mermaid/-/mermaid-11.14.0.tgz", + "integrity": "sha512-GSGloRsBs+JINmmhl0JDwjpuezCsHB4WGI4NASHxL3fHo3o/BRXTxhDLKnln8/Q0lRFRyDdEjmk1/d5Sn1Xz8g==", "license": "MIT", "dependencies": { "@braintree/sanitize-url": "^7.1.1", "@iconify/utils": "^3.0.2", - "@mermaid-js/parser": "^1.0.1", + "@mermaid-js/parser": "^1.1.0", "@types/d3": "^7.4.3", "@upsetjs/venn.js": "^2.0.0", "cytoscape": "^3.33.1", diff --git a/package.json b/package.json index cb734aab0c..3c368c5b1a 100644 --- a/package.json +++ b/package.json @@ -54,7 +54,7 @@ "idiomorph": "0.3.0", "jquery": "3.7.1", "katex": "0.16.44", - "mermaid": "11.13.0", + "mermaid": "11.14.0", "mini-css-extract-plugin": "2.10.0", "minimatch": "10.2.4", "pdfobject": "2.3.0", From e14e2206513f38589665229d7a786e0de41b78fa Mon Sep 17 00:00:00 2001 From: Mathieu Fenniak Date: Sun, 5 Apr 2026 14:37:09 +0200 Subject: [PATCH 087/352] perf: bulk load resolvers & reactions on pull request comments (#11988) Optimize loading pull request review comments, which currently perform separate database queries for each comment in order to load the resolver of the comment, and the reactions on that comment, and the users on each reaction of the comments. I stumbled across this ugly code, which enticed me to look into this: https://codeberg.org/forgejo/forgejo/src/commit/80d840c1284e4f44b9efac208811b9ed26455ade/routers/web/repo/pull.go#L1107-L1120 It appeared to load the attachments from each comment on the pull request review page in separate database queries. It turned out to be a noop, as the attachments are already loaded in bulk: https://codeberg.org/forgejo/forgejo/src/commit/80d840c1284e4f44b9efac208811b9ed26455ade/models/issues/comment_code.go#L120-L122 but the `findCodeComments` method loads the "resolver doer" and the reactions one-by-one for each comment. So I fixed that instead, and removed the ineffective deeply nested for loop. ## Checklist The [contributor guide](https://forgejo.org/docs/next/contributor/) contains information that will be helpful to first time contributors. All work and communication must conform to Forgejo's [AI Agreement](https://codeberg.org/forgejo/governance/src/branch/main/AIAgreement.md). There also are a few [conditions for merging Pull Requests in Forgejo repositories](https://codeberg.org/forgejo/governance/src/branch/main/PullRequestsAgreement.md). You are also welcome to join the [Forgejo development chatroom](https://matrix.to/#/#forgejo-development:matrix.org). ### Tests for Go changes - I added test coverage for Go changes... - [x] in their respective `*_test.go` for unit tests. - [ ] in the `tests/integration` directory if it involves interactions with a live Forgejo server. - I ran... - [x] `make pr-go` before pushing ### Documentation - [ ] I created a pull request [to the documentation](https://codeberg.org/forgejo/docs) to explain to Forgejo users how to use this change. - [x] I did not document these changes and I do not expect someone else to do it. ### Release notes - [ ] This change will be noticed by a Forgejo user or admin (feature, bug fix, performance, etc.). I suggest to include a release note for this change. - [x] This change is not visible to a Forgejo user or admin (refactor, dependency upgrade, etc.). I think there is no need to add a release note for this change. Reviewed-on: https://codeberg.org/forgejo/forgejo/pulls/11988 Reviewed-by: Andreas Ahlenstorf Co-authored-by: Mathieu Fenniak Co-committed-by: Mathieu Fenniak --- models/issues/comment.go | 15 ---- models/issues/comment_code.go | 22 +++--- models/issues/comment_list.go | 79 ++++++++++++++++++++ models/issues/comment_list_test.go | 108 +++++++++++++++++++++++++++ models/issues/comment_test.go | 25 ++++++- models/issues/reaction.go | 31 ++++++++ routers/api/v1/repo/issue_comment.go | 5 ++ routers/web/repo/issue.go | 9 ++- routers/web/repo/pull.go | 15 ---- services/convert/issue_comment.go | 6 -- 10 files changed, 261 insertions(+), 54 deletions(-) diff --git a/models/issues/comment.go b/models/issues/comment.go index fd0f595945..bfad3935fb 100644 --- a/models/issues/comment.go +++ b/models/issues/comment.go @@ -663,21 +663,6 @@ func (c *Comment) LoadAssigneeUserAndTeam(ctx context.Context) error { return nil } -// LoadResolveDoer if comment.Type is CommentTypeCode and ResolveDoerID not zero, then load resolveDoer -func (c *Comment) LoadResolveDoer(ctx context.Context) (err error) { - if c.ResolveDoerID == 0 || c.Type != CommentTypeCode { - return nil - } - c.ResolveDoer, err = user_model.GetUserByID(ctx, c.ResolveDoerID) - if err != nil { - if user_model.IsErrUserNotExist(err) { - c.ResolveDoer = user_model.NewGhostUser() - err = nil - } - } - return err -} - // IsResolved check if an code comment is resolved func (c *Comment) IsResolved() bool { return c.ResolveDoerID != 0 && c.Type == CommentTypeCode diff --git a/models/issues/comment_code.go b/models/issues/comment_code.go index 3c87a1b41a..800d1e830e 100644 --- a/models/issues/comment_code.go +++ b/models/issues/comment_code.go @@ -133,7 +133,7 @@ func findCodeComments(ctx context.Context, opts FindCommentsOptions, issue *Issu return nil, err } - n := 0 + readyComments := make(CommentList, 0, len(comments)) for _, comment := range comments { if re, ok := reviews[comment.ReviewID]; ok && re != nil { // If the review is pending only the author can see the comments (except if the review is set) @@ -143,17 +143,18 @@ func findCodeComments(ctx context.Context, opts FindCommentsOptions, issue *Issu } comment.Review = re } - comments[n] = comment - n++ + readyComments = append(readyComments, comment) + } - if err := comment.LoadResolveDoer(ctx); err != nil { - return nil, err - } + if err := readyComments.LoadResolveDoers(ctx); err != nil { + return nil, err + } - if err := comment.LoadReactions(ctx, issue.Repo); err != nil { - return nil, err - } + if err := readyComments.LoadReactions(ctx, issue.Repo); err != nil { + return nil, err + } + for _, comment := range readyComments { var err error if comment.RenderedContent, err = markdown.RenderString(&markup.RenderContext{ Ctx: ctx, @@ -165,7 +166,8 @@ func findCodeComments(ctx context.Context, opts FindCommentsOptions, issue *Issu return nil, err } } - return comments[:n], nil + + return readyComments, nil } // FetchCodeConversation fetches the code conversation of a given comment (same review, treePath and line number) diff --git a/models/issues/comment_list.go b/models/issues/comment_list.go index 53903bea31..9a5c22244b 100644 --- a/models/issues/comment_list.go +++ b/models/issues/comment_list.go @@ -5,6 +5,7 @@ package issues import ( "context" + "errors" "forgejo.org/models/db" repo_model "forgejo.org/models/repo" @@ -390,6 +391,84 @@ func (comments CommentList) LoadAttachments(ctx context.Context) (err error) { return nil } +func (comments CommentList) LoadResolveDoers(ctx context.Context) (err error) { + relevant := func(c *Comment) bool { + return c.ResolveDoerID != 0 && c.Type == CommentTypeCode + } + userIDs := make(container.Set[int64]) + for _, comment := range comments { + if relevant(comment) { + userIDs.Add(comment.ResolveDoerID) + } + } + + if len(userIDs) == 0 { + return nil + } + + userMap := make(map[int64]*user_model.User) + users, err := user_model.GetUsersByIDs(ctx, userIDs.Slice()) + if err != nil { + return err + } + for _, user := range users { + userMap[user.ID] = user + } + + for _, comment := range comments { + if !relevant(comment) { + continue + } + resolveDoer, ok := userMap[comment.ResolveDoerID] + if !ok { + comment.ResolveDoer = user_model.NewGhostUser() + } else { + comment.ResolveDoer = resolveDoer + } + } + + return nil +} + +func (comments CommentList) LoadReactions(ctx context.Context, repo *repo_model.Repository) (err error) { + loadIssueID := int64(0) + loadCommentIDs := make([]int64, 0, len(comments)) + + for _, comment := range comments { + if loadIssueID == 0 { + loadIssueID = comment.IssueID + } else if loadIssueID != comment.IssueID { + return errors.New("unable to load reactions from comments on different issues than each other") + } + if comment.Reactions == nil { + loadCommentIDs = append(loadCommentIDs, comment.ID) + } + } + + if loadIssueID == 0 { + return nil + } + + reactions, err := getReactionsForComments(ctx, loadIssueID, loadCommentIDs) + if err != nil { + return err + } + + allReactions := make(ReactionList, 0, len(reactions)) + for _, comment := range comments { + if comment.Reactions == nil { + comment.Reactions = reactions[comment.ID] + allReactions = append(allReactions, comment.Reactions...) + } + } + + if _, err := allReactions.LoadUsers(ctx, repo); err != nil { + return err + } + + return nil +} + func (comments CommentList) getReviewIDs() []int64 { return container.FilterSlice(comments, func(comment *Comment) (int64, bool) { return comment.ReviewID, comment.ReviewID > 0 diff --git a/models/issues/comment_list_test.go b/models/issues/comment_list_test.go index 062a710b84..12a9144722 100644 --- a/models/issues/comment_list_test.go +++ b/models/issues/comment_list_test.go @@ -84,3 +84,111 @@ func TestCommentListLoadUser(t *testing.T) { }) } } + +func TestCommentListLoadResolveDoers(t *testing.T) { + require.NoError(t, unittest.PrepareTestDatabase()) + + issue := unittest.AssertExistsAndLoadBean(t, &Issue{}) + repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: issue.RepoID}) + doer := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: repo.OwnerID}) + + empty := CommentList{} + require.NoError(t, empty.LoadResolveDoers(t.Context())) + + comment1, err := CreateComment(db.DefaultContext, &CreateCommentOptions{ + Type: CommentTypeCode, + Doer: doer, + Repo: repo, + Issue: issue, + Content: "Hello", + }) + require.NoError(t, err) + require.NoError(t, MarkConversation(t.Context(), comment1, doer, true)) + comment1 = unittest.AssertExistsAndLoadBean(t, &Comment{ID: comment1.ID}) // reload after change + comment1List := CommentList{comment1} + require.NoError(t, comment1List.LoadResolveDoers(t.Context())) + require.NotNil(t, comment1.ResolveDoer) + assert.Equal(t, doer.ID, comment1.ResolveDoer.ID) + + comment2, err := CreateComment(db.DefaultContext, &CreateCommentOptions{ + Type: CommentTypeCode, + Doer: doer, + Repo: repo, + Issue: issue, + Content: "Hello again", + }) + require.NoError(t, err) + require.NoError(t, MarkConversation(t.Context(), comment2, user_model.NewGhostUser(), true)) + + // Reload for fresh objects + comment1 = unittest.AssertExistsAndLoadBean(t, &Comment{ID: comment1.ID}) + comment2 = unittest.AssertExistsAndLoadBean(t, &Comment{ID: comment2.ID}) + + comment2List := CommentList{comment1, comment2} + require.NoError(t, comment2List.LoadResolveDoers(t.Context())) + require.NotNil(t, comment1.ResolveDoer) + assert.Equal(t, doer.ID, comment1.ResolveDoer.ID) + require.NotNil(t, comment2.ResolveDoer) + assert.EqualValues(t, -1, comment2.ResolveDoer.ID) +} + +func TestCommentListLoadReactions(t *testing.T) { + require.NoError(t, unittest.PrepareTestDatabase()) + + issue := unittest.AssertExistsAndLoadBean(t, &Issue{}) + repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: issue.RepoID}) + doer := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: repo.OwnerID}) + + empty := CommentList{} + require.NoError(t, empty.LoadReactions(t.Context(), repo)) + + comment1, err := CreateComment(db.DefaultContext, &CreateCommentOptions{ + Type: CommentTypeCode, + Doer: doer, + Repo: repo, + Issue: issue, + Content: "Hello", + }) + require.NoError(t, err) + _, err = CreateReaction(t.Context(), &ReactionOptions{ + Type: "eyes", + DoerID: doer.ID, + IssueID: issue.ID, + CommentID: comment1.ID, + }) + require.NoError(t, err) + + comment1 = unittest.AssertExistsAndLoadBean(t, &Comment{ID: comment1.ID}) // reload after change + comment1List := CommentList{comment1} + require.NoError(t, comment1List.LoadReactions(t.Context(), repo)) + require.Len(t, comment1.Reactions, 1) + assert.Equal(t, "eyes", comment1.Reactions[0].Type) + assert.NotNil(t, comment1.Reactions[0].User) + + comment2, err := CreateComment(db.DefaultContext, &CreateCommentOptions{ + Type: CommentTypeCode, + Doer: doer, + Repo: repo, + Issue: issue, + Content: "Hello again", + }) + require.NoError(t, err) + _, err = CreateReaction(t.Context(), &ReactionOptions{ + Type: "rocket", + DoerID: doer.ID, + IssueID: issue.ID, + CommentID: comment2.ID, + }) + require.NoError(t, err) + + // Reload for fresh objects + comment1 = unittest.AssertExistsAndLoadBean(t, &Comment{ID: comment1.ID}) + comment2 = unittest.AssertExistsAndLoadBean(t, &Comment{ID: comment2.ID}) + + comment2List := CommentList{comment1, comment2} + require.NoError(t, comment2List.LoadReactions(t.Context(), repo)) + require.Len(t, comment1.Reactions, 1) + require.Len(t, comment2.Reactions, 1) + assert.Equal(t, "rocket", comment2.Reactions[0].Type) + assert.NotNil(t, comment2.Reactions[0].User) +} diff --git a/models/issues/comment_test.go b/models/issues/comment_test.go index c7adf6f62e..da87b8ec2f 100644 --- a/models/issues/comment_test.go +++ b/models/issues/comment_test.go @@ -52,12 +52,29 @@ func TestFetchCodeConversations(t *testing.T) { issue := unittest.AssertExistsAndLoadBean(t, &issues_model.Issue{ID: 2}) user := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 1}) + _, err := issues_model.CreateReaction(t.Context(), &issues_model.ReactionOptions{ + Type: "eyes", + DoerID: 2, + IssueID: issue.ID, + CommentID: 4, + }) + require.NoError(t, err) + require.NoError(t, issues_model.MarkConversation(t.Context(), + unittest.AssertExistsAndLoadBean(t, &issues_model.Comment{ID: 4}), + user, true)) + res, err := issues_model.FetchCodeConversations(db.DefaultContext, issue, user, false) require.NoError(t, err) - assert.Contains(t, res, "README.md") - assert.Contains(t, res["README.md"], int64(4)) - assert.Len(t, res["README.md"][4], 1) - assert.Equal(t, int64(4), res["README.md"][4][0][0].ID) + require.Contains(t, res, "README.md") + require.Contains(t, res["README.md"], int64(4)) + require.Len(t, res["README.md"][4], 1) + require.Len(t, res["README.md"][4][0], 1) + comment := res["README.md"][4][0][0] + assert.Equal(t, int64(4), comment.ID) + assert.NotNil(t, comment.ResolveDoer) + require.Len(t, comment.Reactions, 1) + r := comment.Reactions[0] + assert.NotNil(t, r.User) user2 := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 2}) res, err = issues_model.FetchCodeConversations(db.DefaultContext, issue, user2, false) diff --git a/models/issues/reaction.go b/models/issues/reaction.go index 522040c022..9a277a8c12 100644 --- a/models/issues/reaction.go +++ b/models/issues/reaction.go @@ -176,6 +176,37 @@ func FindReactions(ctx context.Context, opts FindReactionsOptions) (ReactionList return reactions, count, err } +func getReactionsForComments(ctx context.Context, issueID int64, commentIDs []int64) (map[int64]ReactionList, error) { + reactions := make(map[int64]ReactionList, len(commentIDs)) + left := len(commentIDs) + for left > 0 { + limit := min(left, db.DefaultMaxInSize) + rows, err := db.GetEngine(ctx). + Where(builder.Eq{"issue_id": issueID}). + In("reaction.`type`", setting.UI.Reactions). + In("comment_id", commentIDs[:limit]). + Rows(&Reaction{}) + if err != nil { + return nil, err + } + + for rows.Next() { + var reaction Reaction + err = rows.Scan(&reaction) + if err != nil { + _ = rows.Close() + return nil, err + } + reactions[reaction.CommentID] = append(reactions[reaction.CommentID], &reaction) + } + + _ = rows.Close() + left -= limit + commentIDs = commentIDs[limit:] + } + return reactions, nil +} + func createReaction(ctx context.Context, opts *ReactionOptions) (*Reaction, error) { reaction := &Reaction{ Type: opts.Type, diff --git a/routers/api/v1/repo/issue_comment.go b/routers/api/v1/repo/issue_comment.go index ceebb41f9e..3f58e4e271 100644 --- a/routers/api/v1/repo/issue_comment.go +++ b/routers/api/v1/repo/issue_comment.go @@ -215,6 +215,11 @@ func ListIssueCommentsAndTimeline(ctx *context.APIContext) { return } + if err := comments.LoadResolveDoers(ctx); err != nil { + ctx.Error(http.StatusInternalServerError, "LoadResolveDoers", err) + return + } + var apiComments []*api.TimelineComment for _, comment := range comments { if comment.Type != issues_model.CommentTypeCode && isXRefCommentAccessible(ctx, ctx.Doer, comment, issue.RepoID, ctx.Reducer) { diff --git a/routers/web/repo/issue.go b/routers/web/repo/issue.go index 3852c4c18f..bb4185f785 100644 --- a/routers/web/repo/issue.go +++ b/routers/web/repo/issue.go @@ -1663,6 +1663,11 @@ func ViewIssue(ctx *context.Context) { return } + if err := issue.Comments.LoadResolveDoers(ctx); err != nil { + ctx.ServerError("LoadResolveDoers", err) + return + } + for commentIdx, comment = range issue.Comments { comment.Issue = issue metas := ctx.Repo.Repository.ComposeMetas(ctx) @@ -1801,10 +1806,6 @@ func ViewIssue(ctx *context.Context) { } } } - if err = comment.LoadResolveDoer(ctx); err != nil { - ctx.ServerError("LoadResolveDoer", err) - return - } } else if comment.Type == issues_model.CommentTypePullRequestPush { participants = addParticipant(comment.Poster, participants) if err = comment.LoadPushCommits(ctx); err != nil { diff --git a/routers/web/repo/pull.go b/routers/web/repo/pull.go index c4b8583e2d..e9cdfc5ac7 100644 --- a/routers/web/repo/pull.go +++ b/routers/web/repo/pull.go @@ -1104,21 +1104,6 @@ func viewPullFiles(ctx *context.Context, specifiedStartCommit, specifiedEndCommi return } - for _, file := range diff.Files { - for _, section := range file.Sections { - for _, line := range section.Lines { - for _, comments := range line.Conversations { - for _, comment := range comments { - if err := comment.LoadAttachments(ctx); err != nil { - ctx.ServerError("LoadAttachments", err) - return - } - } - } - } - } - } - pb, err := git_model.GetFirstMatchProtectedBranchRule(ctx, pull.BaseRepoID, pull.BaseBranch) if err != nil { ctx.ServerError("LoadProtectedBranch", err) diff --git a/services/convert/issue_comment.go b/services/convert/issue_comment.go index 9ea315aee6..2f9af9be7c 100644 --- a/services/convert/issue_comment.go +++ b/services/convert/issue_comment.go @@ -43,12 +43,6 @@ func ToTimelineComment(ctx context.Context, repo *repo_model.Repository, c *issu return nil } - err = c.LoadResolveDoer(ctx) - if err != nil { - log.Error("LoadResolveDoer: %v", err) - return nil - } - err = c.LoadDepIssueDetails(ctx) if err != nil { log.Error("LoadDepIssueDetails: %v", err) From 15b4c5efe81ba47b6415a9c188981813aec7f775 Mon Sep 17 00:00:00 2001 From: Mathieu Fenniak Date: Sun, 5 Apr 2026 16:36:57 +0200 Subject: [PATCH 088/352] chore(deps): bump xorm to v1.3.9-forgejo.10 (#11992) Brings [deadlock error type](https://code.forgejo.org/xorm/xorm/pulls/95), which should allow fixing #11968. Reviewed-on: https://codeberg.org/forgejo/forgejo/pulls/11992 Reviewed-by: Andreas Ahlenstorf Co-authored-by: Mathieu Fenniak Co-committed-by: Mathieu Fenniak --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 37039873c8..9ec653792b 100644 --- a/go.mod +++ b/go.mod @@ -273,4 +273,4 @@ replace github.com/gliderlabs/ssh => code.forgejo.org/forgejo/ssh v0.0.0-2024121 replace git.sr.ht/~mariusor/go-xsd-duration => code.forgejo.org/forgejo/go-xsd-duration v0.0.0-20220703122237-02e73435a078 -replace xorm.io/xorm v1.3.9 => code.forgejo.org/xorm/xorm v1.3.9-forgejo.9 +replace xorm.io/xorm v1.3.9 => code.forgejo.org/xorm/xorm v1.3.9-forgejo.10 diff --git a/go.sum b/go.sum index cd477466b7..1f79a74863 100644 --- a/go.sum +++ b/go.sum @@ -42,8 +42,8 @@ code.forgejo.org/go-chi/captcha v1.0.2 h1:vyHDPXkpjDv8bLO9NqtWzZayzstD/WpJ5xwEkA code.forgejo.org/go-chi/captcha v1.0.2/go.mod h1:lxiPLcJ76UCZHoH31/Wbum4GUi2NgjfFZLrJkKv1lLE= code.forgejo.org/go-chi/session v1.0.4 h1:WQ1NaVxcCpxYwCliEGypKclZnOCjh3p1fk8XciJc62U= code.forgejo.org/go-chi/session v1.0.4/go.mod h1:+sSTiomM5C8AUPtxZyTENIbcTz22kcVottKO0lnmDRk= -code.forgejo.org/xorm/xorm v1.3.9-forgejo.9 h1:hzEXDa53opdp5nrGG4F6y8HzFzrGXd5GIvFyUHcvGmI= -code.forgejo.org/xorm/xorm v1.3.9-forgejo.9/go.mod h1:A7sFd3BFmRp20h6drSsCXgQRQdF8Vz8HuCSrzFS3m90= +code.forgejo.org/xorm/xorm v1.3.9-forgejo.10 h1:DCProZz7GP10ue7NoVr1vreuADhH9tEImYFye2+aDG8= +code.forgejo.org/xorm/xorm v1.3.9-forgejo.10/go.mod h1:ly5tUt9l3b+y7HdXDM1UucQXuS58ahNxB9tPM5/6LfM= code.gitea.io/sdk/gitea v0.21.0 h1:69n6oz6kEVHRo1+APQQyizkhrZrLsTLXey9142pfkD4= code.gitea.io/sdk/gitea v0.21.0/go.mod h1:tnBjVhuKJCn8ibdyyhvUyxrR1Ca2KHEoTWoukNhXQPA= code.superseriousbusiness.org/exif-terminator v0.11.1 h1:qnujLH4/Yk/CFtFMmtjozbdV6Ry5G3Q/E/mLlWm/gQI= From 397c8755f2165effbf9696934e30fbcdf5d3ba57 Mon Sep 17 00:00:00 2001 From: forgejo-backport-action Date: Sun, 5 Apr 2026 17:30:39 +0200 Subject: [PATCH 089/352] [v15.0/forgejo] perf: bulk load resolvers & reactions on pull request comments (#11995) **Backport:** https://codeberg.org/forgejo/forgejo/pulls/11988 Optimize loading pull request review comments, which currently perform separate database queries for each comment in order to load the resolver of the comment, and the reactions on that comment, and the users on each reaction of the comments. I stumbled across this ugly code, which enticed me to look into this: https://codeberg.org/forgejo/forgejo/src/commit/80d840c1284e4f44b9efac208811b9ed26455ade/routers/web/repo/pull.go#L1107-L1120 It appeared to load the attachments from each comment on the pull request review page in separate database queries. It turned out to be a noop, as the attachments are already loaded in bulk: https://codeberg.org/forgejo/forgejo/src/commit/80d840c1284e4f44b9efac208811b9ed26455ade/models/issues/comment_code.go#L120-L122 but the `findCodeComments` method loads the "resolver doer" and the reactions one-by-one for each comment. So I fixed that instead, and removed the ineffective deeply nested for loop. ## Checklist The [contributor guide](https://forgejo.org/docs/next/contributor/) contains information that will be helpful to first time contributors. All work and communication must conform to Forgejo's [AI Agreement](https://codeberg.org/forgejo/governance/src/branch/main/AIAgreement.md). There also are a few [conditions for merging Pull Requests in Forgejo repositories](https://codeberg.org/forgejo/governance/src/branch/main/PullRequestsAgreement.md). You are also welcome to join the [Forgejo development chatroom](https://matrix.to/#/#forgejo-development:matrix.org). ### Tests for Go changes - I added test coverage for Go changes... - [x] in their respective `*_test.go` for unit tests. - [ ] in the `tests/integration` directory if it involves interactions with a live Forgejo server. - I ran... - [x] `make pr-go` before pushing ### Documentation - [ ] I created a pull request [to the documentation](https://codeberg.org/forgejo/docs) to explain to Forgejo users how to use this change. - [x] I did not document these changes and I do not expect someone else to do it. ### Release notes - [ ] This change will be noticed by a Forgejo user or admin (feature, bug fix, performance, etc.). I suggest to include a release note for this change. - [x] This change is not visible to a Forgejo user or admin (refactor, dependency upgrade, etc.). I think there is no need to add a release note for this change. Co-authored-by: Mathieu Fenniak Reviewed-on: https://codeberg.org/forgejo/forgejo/pulls/11995 Reviewed-by: Mathieu Fenniak Co-authored-by: forgejo-backport-action Co-committed-by: forgejo-backport-action --- models/issues/comment.go | 15 ---- models/issues/comment_code.go | 22 +++--- models/issues/comment_list.go | 79 ++++++++++++++++++++ models/issues/comment_list_test.go | 108 +++++++++++++++++++++++++++ models/issues/comment_test.go | 25 ++++++- models/issues/reaction.go | 31 ++++++++ routers/api/v1/repo/issue_comment.go | 5 ++ routers/web/repo/issue.go | 9 ++- routers/web/repo/pull.go | 15 ---- services/convert/issue_comment.go | 6 -- 10 files changed, 261 insertions(+), 54 deletions(-) diff --git a/models/issues/comment.go b/models/issues/comment.go index fd0f595945..bfad3935fb 100644 --- a/models/issues/comment.go +++ b/models/issues/comment.go @@ -663,21 +663,6 @@ func (c *Comment) LoadAssigneeUserAndTeam(ctx context.Context) error { return nil } -// LoadResolveDoer if comment.Type is CommentTypeCode and ResolveDoerID not zero, then load resolveDoer -func (c *Comment) LoadResolveDoer(ctx context.Context) (err error) { - if c.ResolveDoerID == 0 || c.Type != CommentTypeCode { - return nil - } - c.ResolveDoer, err = user_model.GetUserByID(ctx, c.ResolveDoerID) - if err != nil { - if user_model.IsErrUserNotExist(err) { - c.ResolveDoer = user_model.NewGhostUser() - err = nil - } - } - return err -} - // IsResolved check if an code comment is resolved func (c *Comment) IsResolved() bool { return c.ResolveDoerID != 0 && c.Type == CommentTypeCode diff --git a/models/issues/comment_code.go b/models/issues/comment_code.go index 3c87a1b41a..800d1e830e 100644 --- a/models/issues/comment_code.go +++ b/models/issues/comment_code.go @@ -133,7 +133,7 @@ func findCodeComments(ctx context.Context, opts FindCommentsOptions, issue *Issu return nil, err } - n := 0 + readyComments := make(CommentList, 0, len(comments)) for _, comment := range comments { if re, ok := reviews[comment.ReviewID]; ok && re != nil { // If the review is pending only the author can see the comments (except if the review is set) @@ -143,17 +143,18 @@ func findCodeComments(ctx context.Context, opts FindCommentsOptions, issue *Issu } comment.Review = re } - comments[n] = comment - n++ + readyComments = append(readyComments, comment) + } - if err := comment.LoadResolveDoer(ctx); err != nil { - return nil, err - } + if err := readyComments.LoadResolveDoers(ctx); err != nil { + return nil, err + } - if err := comment.LoadReactions(ctx, issue.Repo); err != nil { - return nil, err - } + if err := readyComments.LoadReactions(ctx, issue.Repo); err != nil { + return nil, err + } + for _, comment := range readyComments { var err error if comment.RenderedContent, err = markdown.RenderString(&markup.RenderContext{ Ctx: ctx, @@ -165,7 +166,8 @@ func findCodeComments(ctx context.Context, opts FindCommentsOptions, issue *Issu return nil, err } } - return comments[:n], nil + + return readyComments, nil } // FetchCodeConversation fetches the code conversation of a given comment (same review, treePath and line number) diff --git a/models/issues/comment_list.go b/models/issues/comment_list.go index 53903bea31..9a5c22244b 100644 --- a/models/issues/comment_list.go +++ b/models/issues/comment_list.go @@ -5,6 +5,7 @@ package issues import ( "context" + "errors" "forgejo.org/models/db" repo_model "forgejo.org/models/repo" @@ -390,6 +391,84 @@ func (comments CommentList) LoadAttachments(ctx context.Context) (err error) { return nil } +func (comments CommentList) LoadResolveDoers(ctx context.Context) (err error) { + relevant := func(c *Comment) bool { + return c.ResolveDoerID != 0 && c.Type == CommentTypeCode + } + userIDs := make(container.Set[int64]) + for _, comment := range comments { + if relevant(comment) { + userIDs.Add(comment.ResolveDoerID) + } + } + + if len(userIDs) == 0 { + return nil + } + + userMap := make(map[int64]*user_model.User) + users, err := user_model.GetUsersByIDs(ctx, userIDs.Slice()) + if err != nil { + return err + } + for _, user := range users { + userMap[user.ID] = user + } + + for _, comment := range comments { + if !relevant(comment) { + continue + } + resolveDoer, ok := userMap[comment.ResolveDoerID] + if !ok { + comment.ResolveDoer = user_model.NewGhostUser() + } else { + comment.ResolveDoer = resolveDoer + } + } + + return nil +} + +func (comments CommentList) LoadReactions(ctx context.Context, repo *repo_model.Repository) (err error) { + loadIssueID := int64(0) + loadCommentIDs := make([]int64, 0, len(comments)) + + for _, comment := range comments { + if loadIssueID == 0 { + loadIssueID = comment.IssueID + } else if loadIssueID != comment.IssueID { + return errors.New("unable to load reactions from comments on different issues than each other") + } + if comment.Reactions == nil { + loadCommentIDs = append(loadCommentIDs, comment.ID) + } + } + + if loadIssueID == 0 { + return nil + } + + reactions, err := getReactionsForComments(ctx, loadIssueID, loadCommentIDs) + if err != nil { + return err + } + + allReactions := make(ReactionList, 0, len(reactions)) + for _, comment := range comments { + if comment.Reactions == nil { + comment.Reactions = reactions[comment.ID] + allReactions = append(allReactions, comment.Reactions...) + } + } + + if _, err := allReactions.LoadUsers(ctx, repo); err != nil { + return err + } + + return nil +} + func (comments CommentList) getReviewIDs() []int64 { return container.FilterSlice(comments, func(comment *Comment) (int64, bool) { return comment.ReviewID, comment.ReviewID > 0 diff --git a/models/issues/comment_list_test.go b/models/issues/comment_list_test.go index 062a710b84..12a9144722 100644 --- a/models/issues/comment_list_test.go +++ b/models/issues/comment_list_test.go @@ -84,3 +84,111 @@ func TestCommentListLoadUser(t *testing.T) { }) } } + +func TestCommentListLoadResolveDoers(t *testing.T) { + require.NoError(t, unittest.PrepareTestDatabase()) + + issue := unittest.AssertExistsAndLoadBean(t, &Issue{}) + repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: issue.RepoID}) + doer := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: repo.OwnerID}) + + empty := CommentList{} + require.NoError(t, empty.LoadResolveDoers(t.Context())) + + comment1, err := CreateComment(db.DefaultContext, &CreateCommentOptions{ + Type: CommentTypeCode, + Doer: doer, + Repo: repo, + Issue: issue, + Content: "Hello", + }) + require.NoError(t, err) + require.NoError(t, MarkConversation(t.Context(), comment1, doer, true)) + comment1 = unittest.AssertExistsAndLoadBean(t, &Comment{ID: comment1.ID}) // reload after change + comment1List := CommentList{comment1} + require.NoError(t, comment1List.LoadResolveDoers(t.Context())) + require.NotNil(t, comment1.ResolveDoer) + assert.Equal(t, doer.ID, comment1.ResolveDoer.ID) + + comment2, err := CreateComment(db.DefaultContext, &CreateCommentOptions{ + Type: CommentTypeCode, + Doer: doer, + Repo: repo, + Issue: issue, + Content: "Hello again", + }) + require.NoError(t, err) + require.NoError(t, MarkConversation(t.Context(), comment2, user_model.NewGhostUser(), true)) + + // Reload for fresh objects + comment1 = unittest.AssertExistsAndLoadBean(t, &Comment{ID: comment1.ID}) + comment2 = unittest.AssertExistsAndLoadBean(t, &Comment{ID: comment2.ID}) + + comment2List := CommentList{comment1, comment2} + require.NoError(t, comment2List.LoadResolveDoers(t.Context())) + require.NotNil(t, comment1.ResolveDoer) + assert.Equal(t, doer.ID, comment1.ResolveDoer.ID) + require.NotNil(t, comment2.ResolveDoer) + assert.EqualValues(t, -1, comment2.ResolveDoer.ID) +} + +func TestCommentListLoadReactions(t *testing.T) { + require.NoError(t, unittest.PrepareTestDatabase()) + + issue := unittest.AssertExistsAndLoadBean(t, &Issue{}) + repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: issue.RepoID}) + doer := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: repo.OwnerID}) + + empty := CommentList{} + require.NoError(t, empty.LoadReactions(t.Context(), repo)) + + comment1, err := CreateComment(db.DefaultContext, &CreateCommentOptions{ + Type: CommentTypeCode, + Doer: doer, + Repo: repo, + Issue: issue, + Content: "Hello", + }) + require.NoError(t, err) + _, err = CreateReaction(t.Context(), &ReactionOptions{ + Type: "eyes", + DoerID: doer.ID, + IssueID: issue.ID, + CommentID: comment1.ID, + }) + require.NoError(t, err) + + comment1 = unittest.AssertExistsAndLoadBean(t, &Comment{ID: comment1.ID}) // reload after change + comment1List := CommentList{comment1} + require.NoError(t, comment1List.LoadReactions(t.Context(), repo)) + require.Len(t, comment1.Reactions, 1) + assert.Equal(t, "eyes", comment1.Reactions[0].Type) + assert.NotNil(t, comment1.Reactions[0].User) + + comment2, err := CreateComment(db.DefaultContext, &CreateCommentOptions{ + Type: CommentTypeCode, + Doer: doer, + Repo: repo, + Issue: issue, + Content: "Hello again", + }) + require.NoError(t, err) + _, err = CreateReaction(t.Context(), &ReactionOptions{ + Type: "rocket", + DoerID: doer.ID, + IssueID: issue.ID, + CommentID: comment2.ID, + }) + require.NoError(t, err) + + // Reload for fresh objects + comment1 = unittest.AssertExistsAndLoadBean(t, &Comment{ID: comment1.ID}) + comment2 = unittest.AssertExistsAndLoadBean(t, &Comment{ID: comment2.ID}) + + comment2List := CommentList{comment1, comment2} + require.NoError(t, comment2List.LoadReactions(t.Context(), repo)) + require.Len(t, comment1.Reactions, 1) + require.Len(t, comment2.Reactions, 1) + assert.Equal(t, "rocket", comment2.Reactions[0].Type) + assert.NotNil(t, comment2.Reactions[0].User) +} diff --git a/models/issues/comment_test.go b/models/issues/comment_test.go index c7adf6f62e..da87b8ec2f 100644 --- a/models/issues/comment_test.go +++ b/models/issues/comment_test.go @@ -52,12 +52,29 @@ func TestFetchCodeConversations(t *testing.T) { issue := unittest.AssertExistsAndLoadBean(t, &issues_model.Issue{ID: 2}) user := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 1}) + _, err := issues_model.CreateReaction(t.Context(), &issues_model.ReactionOptions{ + Type: "eyes", + DoerID: 2, + IssueID: issue.ID, + CommentID: 4, + }) + require.NoError(t, err) + require.NoError(t, issues_model.MarkConversation(t.Context(), + unittest.AssertExistsAndLoadBean(t, &issues_model.Comment{ID: 4}), + user, true)) + res, err := issues_model.FetchCodeConversations(db.DefaultContext, issue, user, false) require.NoError(t, err) - assert.Contains(t, res, "README.md") - assert.Contains(t, res["README.md"], int64(4)) - assert.Len(t, res["README.md"][4], 1) - assert.Equal(t, int64(4), res["README.md"][4][0][0].ID) + require.Contains(t, res, "README.md") + require.Contains(t, res["README.md"], int64(4)) + require.Len(t, res["README.md"][4], 1) + require.Len(t, res["README.md"][4][0], 1) + comment := res["README.md"][4][0][0] + assert.Equal(t, int64(4), comment.ID) + assert.NotNil(t, comment.ResolveDoer) + require.Len(t, comment.Reactions, 1) + r := comment.Reactions[0] + assert.NotNil(t, r.User) user2 := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 2}) res, err = issues_model.FetchCodeConversations(db.DefaultContext, issue, user2, false) diff --git a/models/issues/reaction.go b/models/issues/reaction.go index 522040c022..9a277a8c12 100644 --- a/models/issues/reaction.go +++ b/models/issues/reaction.go @@ -176,6 +176,37 @@ func FindReactions(ctx context.Context, opts FindReactionsOptions) (ReactionList return reactions, count, err } +func getReactionsForComments(ctx context.Context, issueID int64, commentIDs []int64) (map[int64]ReactionList, error) { + reactions := make(map[int64]ReactionList, len(commentIDs)) + left := len(commentIDs) + for left > 0 { + limit := min(left, db.DefaultMaxInSize) + rows, err := db.GetEngine(ctx). + Where(builder.Eq{"issue_id": issueID}). + In("reaction.`type`", setting.UI.Reactions). + In("comment_id", commentIDs[:limit]). + Rows(&Reaction{}) + if err != nil { + return nil, err + } + + for rows.Next() { + var reaction Reaction + err = rows.Scan(&reaction) + if err != nil { + _ = rows.Close() + return nil, err + } + reactions[reaction.CommentID] = append(reactions[reaction.CommentID], &reaction) + } + + _ = rows.Close() + left -= limit + commentIDs = commentIDs[limit:] + } + return reactions, nil +} + func createReaction(ctx context.Context, opts *ReactionOptions) (*Reaction, error) { reaction := &Reaction{ Type: opts.Type, diff --git a/routers/api/v1/repo/issue_comment.go b/routers/api/v1/repo/issue_comment.go index ceebb41f9e..3f58e4e271 100644 --- a/routers/api/v1/repo/issue_comment.go +++ b/routers/api/v1/repo/issue_comment.go @@ -215,6 +215,11 @@ func ListIssueCommentsAndTimeline(ctx *context.APIContext) { return } + if err := comments.LoadResolveDoers(ctx); err != nil { + ctx.Error(http.StatusInternalServerError, "LoadResolveDoers", err) + return + } + var apiComments []*api.TimelineComment for _, comment := range comments { if comment.Type != issues_model.CommentTypeCode && isXRefCommentAccessible(ctx, ctx.Doer, comment, issue.RepoID, ctx.Reducer) { diff --git a/routers/web/repo/issue.go b/routers/web/repo/issue.go index 3852c4c18f..bb4185f785 100644 --- a/routers/web/repo/issue.go +++ b/routers/web/repo/issue.go @@ -1663,6 +1663,11 @@ func ViewIssue(ctx *context.Context) { return } + if err := issue.Comments.LoadResolveDoers(ctx); err != nil { + ctx.ServerError("LoadResolveDoers", err) + return + } + for commentIdx, comment = range issue.Comments { comment.Issue = issue metas := ctx.Repo.Repository.ComposeMetas(ctx) @@ -1801,10 +1806,6 @@ func ViewIssue(ctx *context.Context) { } } } - if err = comment.LoadResolveDoer(ctx); err != nil { - ctx.ServerError("LoadResolveDoer", err) - return - } } else if comment.Type == issues_model.CommentTypePullRequestPush { participants = addParticipant(comment.Poster, participants) if err = comment.LoadPushCommits(ctx); err != nil { diff --git a/routers/web/repo/pull.go b/routers/web/repo/pull.go index c4b8583e2d..e9cdfc5ac7 100644 --- a/routers/web/repo/pull.go +++ b/routers/web/repo/pull.go @@ -1104,21 +1104,6 @@ func viewPullFiles(ctx *context.Context, specifiedStartCommit, specifiedEndCommi return } - for _, file := range diff.Files { - for _, section := range file.Sections { - for _, line := range section.Lines { - for _, comments := range line.Conversations { - for _, comment := range comments { - if err := comment.LoadAttachments(ctx); err != nil { - ctx.ServerError("LoadAttachments", err) - return - } - } - } - } - } - } - pb, err := git_model.GetFirstMatchProtectedBranchRule(ctx, pull.BaseRepoID, pull.BaseBranch) if err != nil { ctx.ServerError("LoadProtectedBranch", err) diff --git a/services/convert/issue_comment.go b/services/convert/issue_comment.go index 9ea315aee6..2f9af9be7c 100644 --- a/services/convert/issue_comment.go +++ b/services/convert/issue_comment.go @@ -43,12 +43,6 @@ func ToTimelineComment(ctx context.Context, repo *repo_model.Repository, c *issu return nil } - err = c.LoadResolveDoer(ctx) - if err != nil { - log.Error("LoadResolveDoer: %v", err) - return nil - } - err = c.LoadDepIssueDetails(ctx) if err != nil { log.Error("LoadDepIssueDetails: %v", err) From 6a879e79dfaf32bc2876e39815cc7d53a5e5eb6a Mon Sep 17 00:00:00 2001 From: Mathieu Fenniak Date: Sun, 5 Apr 2026 18:38:33 +0200 Subject: [PATCH 090/352] test: fix intermittent test failure in TestPackageDebianConcurrent (#11997) Fixes #11968. Adds deadlocks to the package `RetryTx` operations, and bumps the attempt count to 3. Technically this affects production code, not just test code, but the resulting failure is only likely to occur in highly concurrent operations when uploading packages to the debian registry for the first time for a user, which is more of a test artifact than a production likelihood. Manually tested by modifying the `Makefile` to add the `-test.count=25` option to the test command. This failed consistently on my dev system before this change, failed consistently after the deadlock err was added, and then succeeded consistently (multiple runs) after both changes were combined, giving me confidence that the intermittent failure is squashed. ## Checklist The [contributor guide](https://forgejo.org/docs/next/contributor/) contains information that will be helpful to first time contributors. All work and communication must conform to Forgejo's [AI Agreement](https://codeberg.org/forgejo/governance/src/branch/main/AIAgreement.md). There also are a few [conditions for merging Pull Requests in Forgejo repositories](https://codeberg.org/forgejo/governance/src/branch/main/PullRequestsAgreement.md). You are also welcome to join the [Forgejo development chatroom](https://matrix.to/#/#forgejo-development:matrix.org). ### Tests for Go changes - I added test coverage for Go changes... - [ ] in their respective `*_test.go` for unit tests. - [x] in the `tests/integration` directory if it involves interactions with a live Forgejo server. - Fixing a test failure, so no new tests added, but they already failed. - I ran... - [x] `make pr-go` before pushing ### Documentation - [ ] I created a pull request [to the documentation](https://codeberg.org/forgejo/docs) to explain to Forgejo users how to use this change. - [x] I did not document these changes and I do not expect someone else to do it. ### Release notes - [ ] This change will be noticed by a Forgejo user or admin (feature, bug fix, performance, etc.). I suggest to include a release note for this change. - [x] This change is not visible to a Forgejo user or admin (refactor, dependency upgrade, etc.). I think there is no need to add a release note for this change. Reviewed-on: https://codeberg.org/forgejo/forgejo/pulls/11997 Reviewed-by: Andreas Ahlenstorf Co-authored-by: Mathieu Fenniak Co-committed-by: Mathieu Fenniak --- services/packages/debian/repository.go | 7 ++++--- services/packages/packages.go | 21 ++++++++++++--------- 2 files changed, 16 insertions(+), 12 deletions(-) diff --git a/services/packages/debian/repository.go b/services/packages/debian/repository.go index ab8c4fdc45..e84ae45ebe 100644 --- a/services/packages/debian/repository.go +++ b/services/packages/debian/repository.go @@ -318,9 +318,10 @@ func buildReleaseFiles(ctx context.Context, ownerID int64, repoVersion *packages // way. var priv string err = db.RetryTx(ctx, db.RetryConfig{ - // A single retry is sufficient the user/org's key pair would have been created by the first successful tx. - AttemptCount: 2, - ErrorIs: []error{xorm.ErrUniqueConstraintViolation}, + // A single retry is sufficient the user/org's key pair would have been created by the first successful tx; an + // additional retry may be necessary if a deadlock occurs with concurrent updates. + AttemptCount: 3, + ErrorIs: []error{xorm.ErrUniqueConstraintViolation, xorm.ErrDeadlock}, }, func(ctx context.Context) error { priv, _, err = GetOrCreateKeyPair(ctx, ownerID) return err diff --git a/services/packages/packages.go b/services/packages/packages.go index a1772cc1d9..c75b4559c8 100644 --- a/services/packages/packages.go +++ b/services/packages/packages.go @@ -95,9 +95,10 @@ func createPackageAndAddFile(ctx context.Context, pvci *PackageCreationInfo, pfc // causes such a recovery from error to panic. So, we retry the entire modification transaction if // ErrUniqueConstraintViolation is encountered. err := db.RetryTx(ctx, db.RetryConfig{ - // A single retry is sufficient as any package index that was concurrently modified should now be present: - AttemptCount: 2, - ErrorIs: []error{xorm.ErrUniqueConstraintViolation}, + // A single retry is sufficient as any package index that was concurrently modified should now be present; an + // additional retry may be necessary if a deadlock occurs with concurrent updates. + AttemptCount: 3, + ErrorIs: []error{xorm.ErrUniqueConstraintViolation, xorm.ErrDeadlock}, }, func(ctx context.Context) error { var err error var pb *packages_model.PackageBlob @@ -237,9 +238,10 @@ func addFileToPackageWrapper(ctx context.Context, fn func(ctx context.Context) ( // See comment in createPackageAndAddFile which explains why RetryTx is used with ErrUniqueConstraintViolation. err := db.RetryTx(ctx, db.RetryConfig{ - // A single retry is sufficient as any package index that was concurrently modified should now be present: - AttemptCount: 2, - ErrorIs: []error{xorm.ErrUniqueConstraintViolation}, + // A single retry is sufficient as any package index that was concurrently modified should now be present; an + // additional retry may be necessary if a deadlock occurs with concurrent updates. + AttemptCount: 3, + ErrorIs: []error{xorm.ErrUniqueConstraintViolation, xorm.ErrDeadlock}, }, func(ctx context.Context) error { var err error var blobCreated bool @@ -468,9 +470,10 @@ func GetOrCreateInternalPackageVersion(ctx context.Context, ownerID int64, packa // See comment in createPackageAndAddFile which explains why RetryTx is used with ErrUniqueConstraintViolation. return pv, db.RetryTx(ctx, db.RetryConfig{ - // A single retry is sufficient as any package index that was concurrently modified should now be present: - AttemptCount: 2, - ErrorIs: []error{xorm.ErrUniqueConstraintViolation}, + // A single retry is sufficient as any package index that was concurrently modified should now be present; an + // additional retry may be necessary if a deadlock occurs with concurrent updates. + AttemptCount: 3, + ErrorIs: []error{xorm.ErrUniqueConstraintViolation, xorm.ErrDeadlock}, }, func(ctx context.Context) error { p := &packages_model.Package{ OwnerID: ownerID, From 9abc1b0144feaf053176ff80bd94d694a3cfce13 Mon Sep 17 00:00:00 2001 From: Mathieu Fenniak Date: Sun, 5 Apr 2026 22:03:45 +0200 Subject: [PATCH 091/352] refactor: reduce code duplication when accessing `DefaultMaxInSize` (#11999) `DefaultMaxInSize` is an internal parameter for limiting the size of `field IN (...)` clauses in DB queries, which is a reasonable thing to do -- in addition to the errors noted when [originally introduced](https://github.com/go-gitea/gitea/pull/4594), there are technical limits that apply to each of PostgreSQL, MySQL, and SQLite which would prevent an unbounded size for a query like this. However: the size is incredibly small at 50, and, the implementation of `DefaultMaxInSize` is really wasteful with copy-and-paste coding. This PR: - introduces `GetByIDs` which fetches a `map[int64]*Model` from the database for an array of ID values, while respecting `IN` clause size limits - introduces `GetByFieldIn` which fetches a `map[int64][]*Model` from the database for an array of field values, while respecting `IN` clause size limits - uses `slices.Chunk` for other locations where queries are too complex for these implementations - bumps the `DefaultMaxInSize` parameter from 50 to 500, a conservative increase well under known limits, but 10x the current value: - PostgreSQL supports up to 1GB query text size with 65,535 parameters, but I've experienced performance degradation at high value counts - MySQL supports 64MB query text size without known limits of parameter count - SQLite supports 32,766 parameters in a query ## Checklist The [contributor guide](https://forgejo.org/docs/next/contributor/) contains information that will be helpful to first time contributors. All work and communication must conform to Forgejo's [AI Agreement](https://codeberg.org/forgejo/governance/src/branch/main/AIAgreement.md). There also are a few [conditions for merging Pull Requests in Forgejo repositories](https://codeberg.org/forgejo/governance/src/branch/main/PullRequestsAgreement.md). You are also welcome to join the [Forgejo development chatroom](https://matrix.to/#/#forgejo-development:matrix.org). ### Tests for Go changes - I added test coverage for Go changes... - [x] in their respective `*_test.go` for unit tests. - Refactored functions are assumed to be covered by existing tests to some extent; that assumption is probably wrong but the changes here are relatively easily reviewed for correctness as well. - [ ] in the `tests/integration` directory if it involves interactions with a live Forgejo server. - I ran... - [x] `make pr-go` before pushing ### Documentation - [ ] I created a pull request [to the documentation](https://codeberg.org/forgejo/docs) to explain to Forgejo users how to use this change. - [x] I did not document these changes and I do not expect someone else to do it. ### Release notes - [ ] This change will be noticed by a Forgejo user or admin (feature, bug fix, performance, etc.). I suggest to include a release note for this change. - [x] This change is not visible to a Forgejo user or admin (refactor, dependency upgrade, etc.). I think there is no need to add a release note for this change. Reviewed-on: https://codeberg.org/forgejo/forgejo/pulls/11999 Reviewed-by: Andreas Ahlenstorf Co-authored-by: Mathieu Fenniak Co-committed-by: Mathieu Fenniak --- models/activities/notification_list.go | 112 ++--------------- models/db/context.go | 86 +++++++++++++ models/db/list.go | 2 +- models/issues/comment_list.go | 168 ++++--------------------- models/issues/issue_list.go | 151 ++++------------------ models/issues/reaction.go | 10 +- tests/integration/db_query_test.go | 64 ++++++++++ 7 files changed, 215 insertions(+), 378 deletions(-) create mode 100644 tests/integration/db_query_test.go diff --git a/models/activities/notification_list.go b/models/activities/notification_list.go index bf6356021e..3f3a48eaa5 100644 --- a/models/activities/notification_list.go +++ b/models/activities/notification_list.go @@ -210,31 +210,9 @@ func (nl NotificationList) LoadRepos(ctx context.Context) (repo_model.Repository } repoIDs := nl.getPendingRepoIDs() - repos := make(map[int64]*repo_model.Repository, len(repoIDs)) - left := len(repoIDs) - for left > 0 { - limit := min(left, db.DefaultMaxInSize) - rows, err := db.GetEngine(ctx). - In("id", repoIDs[:limit]). - Rows(new(repo_model.Repository)) - if err != nil { - return nil, nil, err - } - - for rows.Next() { - var repo repo_model.Repository - err = rows.Scan(&repo) - if err != nil { - rows.Close() - return nil, nil, err - } - - repos[repo.ID] = &repo - } - _ = rows.Close() - - left -= limit - repoIDs = repoIDs[limit:] + repos, err := db.GetByIDs(ctx, "id", repoIDs, &repo_model.Repository{}) + if err != nil { + return nil, nil, err } failed := []int{} @@ -281,31 +259,9 @@ func (nl NotificationList) LoadIssues(ctx context.Context) ([]int, error) { } issueIDs := nl.getPendingIssueIDs() - issues := make(map[int64]*issues_model.Issue, len(issueIDs)) - left := len(issueIDs) - for left > 0 { - limit := min(left, db.DefaultMaxInSize) - rows, err := db.GetEngine(ctx). - In("id", issueIDs[:limit]). - Rows(new(issues_model.Issue)) - if err != nil { - return nil, err - } - - for rows.Next() { - var issue issues_model.Issue - err = rows.Scan(&issue) - if err != nil { - rows.Close() - return nil, err - } - - issues[issue.ID] = &issue - } - _ = rows.Close() - - left -= limit - issueIDs = issueIDs[limit:] + issues, err := db.GetByIDs(ctx, "id", issueIDs, &issues_model.Issue{}) + if err != nil { + return nil, err } failures := []int{} @@ -373,31 +329,9 @@ func (nl NotificationList) LoadUsers(ctx context.Context) ([]int, error) { } userIDs := nl.getUserIDs() - users := make(map[int64]*user_model.User, len(userIDs)) - left := len(userIDs) - for left > 0 { - limit := min(left, db.DefaultMaxInSize) - rows, err := db.GetEngine(ctx). - In("id", userIDs[:limit]). - Rows(new(user_model.User)) - if err != nil { - return nil, err - } - - for rows.Next() { - var user user_model.User - err = rows.Scan(&user) - if err != nil { - rows.Close() - return nil, err - } - - users[user.ID] = &user - } - _ = rows.Close() - - left -= limit - userIDs = userIDs[limit:] + users, err := db.GetByIDs(ctx, "id", userIDs, &user_model.User{}) + if err != nil { + return nil, err } failures := []int{} @@ -421,31 +355,9 @@ func (nl NotificationList) LoadComments(ctx context.Context) ([]int, error) { } commentIDs := nl.getPendingCommentIDs() - comments := make(map[int64]*issues_model.Comment, len(commentIDs)) - left := len(commentIDs) - for left > 0 { - limit := min(left, db.DefaultMaxInSize) - rows, err := db.GetEngine(ctx). - In("id", commentIDs[:limit]). - Rows(new(issues_model.Comment)) - if err != nil { - return nil, err - } - - for rows.Next() { - var comment issues_model.Comment - err = rows.Scan(&comment) - if err != nil { - rows.Close() - return nil, err - } - - comments[comment.ID] = &comment - } - _ = rows.Close() - - left -= limit - commentIDs = commentIDs[limit:] + comments, err := db.GetByIDs(ctx, "id", commentIDs, &issues_model.Comment{}) + if err != nil { + return nil, err } failures := []int{} diff --git a/models/db/context.go b/models/db/context.go index f098b40a32..18237bb2a2 100644 --- a/models/db/context.go +++ b/models/db/context.go @@ -8,6 +8,7 @@ import ( "database/sql" "errors" "fmt" + "slices" "xorm.io/builder" "xorm.io/xorm" @@ -270,6 +271,91 @@ func GetByID[T any](ctx context.Context, id int64) (object *T, exist bool, err e return &bean, true, nil } +// Retrieves multiple objects with database queries similar to an xorm `.In(idField, idList)`. idField must be a unique +// field on the database table, as a map[id]obj is returned and the usage of a non-unique field would result in objects +// being overwritten in the map. +// +// The length of the IN list is constrained to DefaultMaxInSize for each database query, resulting in multiple database +// queries if the length of the idList exceeds that setting; this constraint prevents exceeding bind parameter +// limitations or query length limitations in the database engine. +func GetByIDs[Bean any, Id comparable](ctx context.Context, idField string, idList []Id, bean *Bean) (map[Id]*Bean, error) { + retval := make(map[Id]*Bean, len(idList)) + if len(idList) == 0 { + return retval, nil + } + + table, err := TableInfo(bean) + if err != nil { + return nil, fmt.Errorf("unable to fetch table info for bean %v: %w", bean, err) + } + + var structFieldName string + for _, c := range table.Columns() { + if c.Name == idField { + structFieldName = c.FieldName + break + } + } + if structFieldName == "" { + return nil, fmt.Errorf("unable to identify struct field for id field %s", idField) + } + + for idChunk := range slices.Chunk(idList, DefaultMaxInSize) { + beans := make([]*Bean, 0, len(idChunk)) + if err := GetEngine(ctx).In(idField, idChunk).Find(&beans); err != nil { + return nil, err + } + for _, bean := range beans { + retval[extractFieldValue(bean, structFieldName).(Id)] = bean + } + } + + return retval, nil +} + +// Retrieves multiple objects with database queries similar to an xorm `.In(field, valueList)`. Similar to GetByIDs, +// except that a map[Id][]*Bean is returned as the field value is not assumed to be a unique value -- if there are +// multiple rows in the table for each value, all of them are returned. +// +// The length of the IN list is constrained to DefaultMaxInSize for each database query, resulting in multiple database +// queries if the length of the idList exceeds that setting; this constraint prevents exceeding bind parameter +// limitations or query length limitations in the database engine. +func GetByFieldIn[Bean any, Id comparable](ctx context.Context, field string, valueList []Id, bean *Bean) (map[Id][]*Bean, error) { + retval := make(map[Id][]*Bean, len(valueList)) + if len(valueList) == 0 { + return retval, nil + } + + table, err := TableInfo(bean) + if err != nil { + return nil, fmt.Errorf("unable to fetch table info for bean %v: %w", bean, err) + } + + var structFieldName string + for _, c := range table.Columns() { + if c.Name == field { + structFieldName = c.FieldName + break + } + } + if structFieldName == "" { + return nil, fmt.Errorf("unable to identify struct field for field %s", field) + } + + for idChunk := range slices.Chunk(valueList, DefaultMaxInSize) { + beans := make([]*Bean, 0, len(idChunk)) + if err := GetEngine(ctx).In(field, idChunk).Find(&beans); err != nil { + return nil, err + } + for _, bean := range beans { + fieldValue := extractFieldValue(bean, structFieldName).(Id) + retval[fieldValue] = append(retval[fieldValue], bean) + } + } + + return retval, nil +} + func Exist[T any](ctx context.Context, cond builder.Cond) (bool, error) { if !cond.IsValid() { panic("cond is invalid in db.Exist(ctx, cond). This should not be possible.") diff --git a/models/db/list.go b/models/db/list.go index 057221936c..71e9a0b1d2 100644 --- a/models/db/list.go +++ b/models/db/list.go @@ -14,7 +14,7 @@ import ( const ( // DefaultMaxInSize represents default variables number on IN () in SQL - DefaultMaxInSize = 50 + DefaultMaxInSize = 500 defaultFindSliceSize = 10 ) diff --git a/models/issues/comment_list.go b/models/issues/comment_list.go index 9a5c22244b..b218f11dfa 100644 --- a/models/issues/comment_list.go +++ b/models/issues/comment_list.go @@ -52,29 +52,9 @@ func (comments CommentList) loadLabels(ctx context.Context) error { } labelIDs := comments.getLabelIDs() - commentLabels := make(map[int64]*Label, len(labelIDs)) - left := len(labelIDs) - for left > 0 { - limit := min(left, db.DefaultMaxInSize) - rows, err := db.GetEngine(ctx). - In("id", labelIDs[:limit]). - Rows(new(Label)) - if err != nil { - return err - } - - for rows.Next() { - var label Label - err = rows.Scan(&label) - if err != nil { - _ = rows.Close() - return err - } - commentLabels[label.ID] = &label - } - _ = rows.Close() - left -= limit - labelIDs = labelIDs[limit:] + commentLabels, err := db.GetByIDs(ctx, "id", labelIDs, &Label{}) + if err != nil { + return err } for _, comment := range comments { @@ -99,18 +79,9 @@ func (comments CommentList) loadMilestones(ctx context.Context) error { return nil } - milestones := make(map[int64]*Milestone, len(milestoneIDs)) - left := len(milestoneIDs) - for left > 0 { - limit := min(left, db.DefaultMaxInSize) - err := db.GetEngine(ctx). - In("id", milestoneIDs[:limit]). - Find(&milestones) - if err != nil { - return err - } - left -= limit - milestoneIDs = milestoneIDs[limit:] + milestones, err := db.GetByIDs(ctx, "id", milestoneIDs, &Milestone{}) + if err != nil { + return err } for _, comment := range comments { @@ -135,18 +106,9 @@ func (comments CommentList) loadOldMilestones(ctx context.Context) error { return nil } - milestones := make(map[int64]*Milestone, len(milestoneIDs)) - left := len(milestoneIDs) - for left > 0 { - limit := min(left, db.DefaultMaxInSize) - err := db.GetEngine(ctx). - In("id", milestoneIDs[:limit]). - Find(&milestones) - if err != nil { - return err - } - left -= limit - milestoneIDs = milestoneIDs[limit:] + milestones, err := db.GetByIDs(ctx, "id", milestoneIDs, &Milestone{}) + if err != nil { + return err } for _, comment := range comments { @@ -167,31 +129,9 @@ func (comments CommentList) loadAssignees(ctx context.Context) error { } assigneeIDs := comments.getAssigneeIDs() - assignees := make(map[int64]*user_model.User, len(assigneeIDs)) - left := len(assigneeIDs) - for left > 0 { - limit := min(left, db.DefaultMaxInSize) - rows, err := db.GetEngine(ctx). - In("id", assigneeIDs[:limit]). - Rows(new(user_model.User)) - if err != nil { - return err - } - - for rows.Next() { - var user user_model.User - err = rows.Scan(&user) - if err != nil { - rows.Close() - return err - } - - assignees[user.ID] = &user - } - _ = rows.Close() - - left -= limit - assigneeIDs = assigneeIDs[limit:] + assignees, err := db.GetByIDs(ctx, "id", assigneeIDs, &user_model.User{}) + if err != nil { + return err } for _, comment := range comments { @@ -232,31 +172,9 @@ func (comments CommentList) LoadIssues(ctx context.Context) error { } issueIDs := comments.getIssueIDs() - issues := make(map[int64]*Issue, len(issueIDs)) - left := len(issueIDs) - for left > 0 { - limit := min(left, db.DefaultMaxInSize) - rows, err := db.GetEngine(ctx). - In("id", issueIDs[:limit]). - Rows(new(Issue)) - if err != nil { - return err - } - - for rows.Next() { - var issue Issue - err = rows.Scan(&issue) - if err != nil { - rows.Close() - return err - } - - issues[issue.ID] = &issue - } - _ = rows.Close() - - left -= limit - issueIDs = issueIDs[limit:] + issues, err := db.GetByIDs(ctx, "id", issueIDs, &Issue{}) + if err != nil { + return err } for _, comment := range comments { @@ -281,33 +199,10 @@ func (comments CommentList) loadDependentIssues(ctx context.Context) error { return nil } - e := db.GetEngine(ctx) issueIDs := comments.getDependentIssueIDs() - issues := make(map[int64]*Issue, len(issueIDs)) - left := len(issueIDs) - for left > 0 { - limit := min(left, db.DefaultMaxInSize) - rows, err := e. - In("id", issueIDs[:limit]). - Rows(new(Issue)) - if err != nil { - return err - } - - for rows.Next() { - var issue Issue - err = rows.Scan(&issue) - if err != nil { - _ = rows.Close() - return err - } - - issues[issue.ID] = &issue - } - _ = rows.Close() - - left -= limit - issueIDs = issueIDs[limit:] + issues, err := db.GetByIDs(ctx, "id", issueIDs, &Issue{}) + if err != nil { + return err } for _, comment := range comments { @@ -358,31 +253,10 @@ func (comments CommentList) LoadAttachments(ctx context.Context) (err error) { return nil } - attachments := make(map[int64][]*repo_model.Attachment, len(comments)) commentsIDs := comments.getAttachmentCommentIDs() - left := len(commentsIDs) - for left > 0 { - limit := min(left, db.DefaultMaxInSize) - rows, err := db.GetEngine(ctx). - In("comment_id", commentsIDs[:limit]). - Rows(new(repo_model.Attachment)) - if err != nil { - return err - } - - for rows.Next() { - var attachment repo_model.Attachment - err = rows.Scan(&attachment) - if err != nil { - _ = rows.Close() - return err - } - attachments[attachment.CommentID] = append(attachments[attachment.CommentID], &attachment) - } - - _ = rows.Close() - left -= limit - commentsIDs = commentsIDs[limit:] + attachments, err := db.GetByFieldIn(ctx, "comment_id", commentsIDs, &repo_model.Attachment{}) + if err != nil { + return err } for _, comment := range comments { diff --git a/models/issues/issue_list.go b/models/issues/issue_list.go index 34cfe35475..e4fd9eef2b 100644 --- a/models/issues/issue_list.go +++ b/models/issues/issue_list.go @@ -6,6 +6,7 @@ package issues import ( "context" "fmt" + "slices" "forgejo.org/models/db" project_model "forgejo.org/models/project" @@ -40,18 +41,9 @@ func (issues IssueList) LoadRepositories(ctx context.Context) (repo_model.Reposi } repoIDs := issues.getRepoIDs() - repoMaps := make(map[int64]*repo_model.Repository, len(repoIDs)) - left := len(repoIDs) - for left > 0 { - limit := min(left, db.DefaultMaxInSize) - err := db.GetEngine(ctx). - In("id", repoIDs[:limit]). - Find(&repoMaps) - if err != nil { - return nil, fmt.Errorf("find repository: %w", err) - } - left -= limit - repoIDs = repoIDs[limit:] + repoMaps, err := db.GetByIDs(ctx, "id", repoIDs, &repo_model.Repository{}) + if err != nil { + return nil, fmt.Errorf("find repository: %w", err) } for _, issue := range issues { @@ -93,18 +85,9 @@ func (issues IssueList) LoadPosters(ctx context.Context) error { } func getPostersByIDs(ctx context.Context, posterIDs []int64) (map[int64]*user_model.User, error) { - posterMaps := make(map[int64]*user_model.User, len(posterIDs)) - left := len(posterIDs) - for left > 0 { - limit := min(left, db.DefaultMaxInSize) - err := db.GetEngine(ctx). - In("id", posterIDs[:limit]). - Find(&posterMaps) - if err != nil { - return nil, err - } - left -= limit - posterIDs = posterIDs[limit:] + posterMaps, err := db.GetByIDs(ctx, "id", posterIDs, &user_model.User{}) + if err != nil { + return nil, err } return posterMaps, nil } @@ -129,18 +112,15 @@ func (issues IssueList) LoadLabels(ctx context.Context) error { issueLabels := make(map[int64][]*Label, len(issues)*3) issueIDs := issues.getIssueIDs() - left := len(issueIDs) - for left > 0 { - limit := min(left, db.DefaultMaxInSize) + for issueIDChunk := range slices.Chunk(issueIDs, db.DefaultMaxInSize) { rows, err := db.GetEngine(ctx).Table("label"). Join("LEFT", "issue_label", "issue_label.label_id = label.id"). - In("issue_label.issue_id", issueIDs[:limit]). + In("issue_label.issue_id", issueIDChunk). Asc("label.name"). Rows(new(LabelIssue)) if err != nil { return err } - for rows.Next() { var labelIssue LabelIssue err = rows.Scan(&labelIssue) @@ -157,8 +137,6 @@ func (issues IssueList) LoadLabels(ctx context.Context) error { if err1 := rows.Close(); err1 != nil { return fmt.Errorf("IssueList.LoadLabels: Close: %w", err1) } - left -= limit - issueIDs = issueIDs[limit:] } for _, issue := range issues { @@ -180,18 +158,9 @@ func (issues IssueList) LoadMilestones(ctx context.Context) error { return nil } - milestoneMaps := make(map[int64]*Milestone, len(milestoneIDs)) - left := len(milestoneIDs) - for left > 0 { - limit := min(left, db.DefaultMaxInSize) - err := db.GetEngine(ctx). - In("id", milestoneIDs[:limit]). - Find(&milestoneMaps) - if err != nil { - return err - } - left -= limit - milestoneIDs = milestoneIDs[limit:] + milestoneMaps, err := db.GetByIDs(ctx, "id", milestoneIDs, &Milestone{}) + if err != nil { + return err } for _, issue := range issues { @@ -204,22 +173,19 @@ func (issues IssueList) LoadMilestones(ctx context.Context) error { func (issues IssueList) LoadProjects(ctx context.Context) error { issueIDs := issues.getIssueIDs() projectMaps := make(map[int64]*project_model.Project, len(issues)) - left := len(issueIDs) type projectWithIssueID struct { *project_model.Project `xorm:"extends"` IssueID int64 } - for left > 0 { - limit := min(left, db.DefaultMaxInSize) - - projects := make([]*projectWithIssueID, 0, limit) + for issueIDChunk := range slices.Chunk(issueIDs, db.DefaultMaxInSize) { + projects := make([]*projectWithIssueID, 0, len(issueIDChunk)) err := db.GetEngine(ctx). Table("project"). Select("project.*, project_issue.issue_id"). Join("INNER", "project_issue", "project.id = project_issue.project_id"). - In("project_issue.issue_id", issueIDs[:limit]). + In("project_issue.issue_id", issueIDChunk). Find(&projects) if err != nil { return err @@ -227,8 +193,6 @@ func (issues IssueList) LoadProjects(ctx context.Context) error { for _, project := range projects { projectMaps[project.IssueID] = project.Project } - left -= limit - issueIDs = issueIDs[limit:] } for _, issue := range issues { @@ -249,12 +213,10 @@ func (issues IssueList) LoadAssignees(ctx context.Context) error { assignees := make(map[int64][]*user_model.User, len(issues)) issueIDs := issues.getIssueIDs() - left := len(issueIDs) - for left > 0 { - limit := min(left, db.DefaultMaxInSize) + for issueIDChunk := range slices.Chunk(issueIDs, db.DefaultMaxInSize) { rows, err := db.GetEngine(ctx).Table("issue_assignees"). Join("INNER", "`user`", "`user`.id = `issue_assignees`.assignee_id"). - In("`issue_assignees`.issue_id", issueIDs[:limit]).OrderBy(user_model.GetOrderByName()). + In("`issue_assignees`.issue_id", issueIDChunk).OrderBy(user_model.GetOrderByName()). Rows(new(AssigneeIssue)) if err != nil { return err @@ -275,8 +237,6 @@ func (issues IssueList) LoadAssignees(ctx context.Context) error { if err1 := rows.Close(); err1 != nil { return fmt.Errorf("IssueList.loadAssignees: Close: %w", err1) } - left -= limit - issueIDs = issueIDs[limit:] } for _, issue := range issues { @@ -306,33 +266,9 @@ func (issues IssueList) LoadPullRequests(ctx context.Context) error { return nil } - pullRequestMaps := make(map[int64]*PullRequest, len(issuesIDs)) - left := len(issuesIDs) - for left > 0 { - limit := min(left, db.DefaultMaxInSize) - rows, err := db.GetEngine(ctx). - In("issue_id", issuesIDs[:limit]). - Rows(new(PullRequest)) - if err != nil { - return err - } - - for rows.Next() { - var pr PullRequest - err = rows.Scan(&pr) - if err != nil { - if err1 := rows.Close(); err1 != nil { - return fmt.Errorf("IssueList.loadPullRequests: Close: %w", err1) - } - return err - } - pullRequestMaps[pr.IssueID] = &pr - } - if err1 := rows.Close(); err1 != nil { - return fmt.Errorf("IssueList.loadPullRequests: Close: %w", err1) - } - left -= limit - issuesIDs = issuesIDs[limit:] + pullRequestMaps, err := db.GetByIDs(ctx, "issue_id", issuesIDs, &PullRequest{}) + if err != nil { + return err } for _, issue := range issues { @@ -350,34 +286,10 @@ func (issues IssueList) LoadAttachments(ctx context.Context) (err error) { return nil } - attachments := make(map[int64][]*repo_model.Attachment, len(issues)) issuesIDs := issues.getIssueIDs() - left := len(issuesIDs) - for left > 0 { - limit := min(left, db.DefaultMaxInSize) - rows, err := db.GetEngine(ctx). - In("issue_id", issuesIDs[:limit]). - Rows(new(repo_model.Attachment)) - if err != nil { - return err - } - - for rows.Next() { - var attachment repo_model.Attachment - err = rows.Scan(&attachment) - if err != nil { - if err1 := rows.Close(); err1 != nil { - return fmt.Errorf("IssueList.loadAttachments: Close: %w", err1) - } - return err - } - attachments[attachment.IssueID] = append(attachments[attachment.IssueID], &attachment) - } - if err1 := rows.Close(); err1 != nil { - return fmt.Errorf("IssueList.loadAttachments: Close: %w", err1) - } - left -= limit - issuesIDs = issuesIDs[limit:] + attachments, err := db.GetByFieldIn(ctx, "issue_id", issuesIDs, &repo_model.Attachment{}) + if err != nil { + return err } for _, issue := range issues { @@ -394,12 +306,10 @@ func (issues IssueList) loadComments(ctx context.Context, cond builder.Cond) (er comments := make(map[int64][]*Comment, len(issues)) issuesIDs := issues.getIssueIDs() - left := len(issuesIDs) - for left > 0 { - limit := min(left, db.DefaultMaxInSize) + for issueIDChunk := range slices.Chunk(issuesIDs, db.DefaultMaxInSize) { rows, err := db.GetEngine(ctx).Table("comment"). Join("INNER", "issue", "issue.id = comment.issue_id"). - In("issue.id", issuesIDs[:limit]). + In("issue.id", issueIDChunk). Where(cond). Rows(new(Comment)) if err != nil { @@ -420,8 +330,6 @@ func (issues IssueList) loadComments(ctx context.Context, cond builder.Cond) (er if err1 := rows.Close(); err1 != nil { return fmt.Errorf("IssueList.loadComments: Close: %w", err1) } - left -= limit - issuesIDs = issuesIDs[limit:] } for _, issue := range issues { @@ -457,15 +365,12 @@ func (issues IssueList) loadTotalTrackedTimes(ctx context.Context) (err error) { } } - left := len(ids) - for left > 0 { - limit := min(left, db.DefaultMaxInSize) - + for idChunk := range slices.Chunk(ids, db.DefaultMaxInSize) { // select issue_id, sum(time) from tracked_time where issue_id in () group by issue_id rows, err := db.GetEngine(ctx).Table("tracked_time"). Where("deleted = ?", false). Select("issue_id, sum(time) as time"). - In("issue_id", ids[:limit]). + In("issue_id", idChunk). GroupBy("issue_id"). Rows(new(totalTimesByIssue)) if err != nil { @@ -486,8 +391,6 @@ func (issues IssueList) loadTotalTrackedTimes(ctx context.Context) (err error) { if err1 := rows.Close(); err1 != nil { return fmt.Errorf("IssueList.loadTotalTrackedTimes: Close: %w", err1) } - left -= limit - ids = ids[limit:] } for _, issue := range issues { diff --git a/models/issues/reaction.go b/models/issues/reaction.go index 9a277a8c12..21975c6b00 100644 --- a/models/issues/reaction.go +++ b/models/issues/reaction.go @@ -7,6 +7,7 @@ import ( "bytes" "context" "fmt" + "slices" "forgejo.org/models/db" repo_model "forgejo.org/models/repo" @@ -178,13 +179,12 @@ func FindReactions(ctx context.Context, opts FindReactionsOptions) (ReactionList func getReactionsForComments(ctx context.Context, issueID int64, commentIDs []int64) (map[int64]ReactionList, error) { reactions := make(map[int64]ReactionList, len(commentIDs)) - left := len(commentIDs) - for left > 0 { - limit := min(left, db.DefaultMaxInSize) + + for commentIDChunk := range slices.Chunk(commentIDs, db.DefaultMaxInSize) { rows, err := db.GetEngine(ctx). Where(builder.Eq{"issue_id": issueID}). In("reaction.`type`", setting.UI.Reactions). - In("comment_id", commentIDs[:limit]). + In("comment_id", commentIDChunk). Rows(&Reaction{}) if err != nil { return nil, err @@ -201,8 +201,6 @@ func getReactionsForComments(ctx context.Context, issueID int64, commentIDs []in } _ = rows.Close() - left -= limit - commentIDs = commentIDs[limit:] } return reactions, nil } diff --git a/tests/integration/db_query_test.go b/tests/integration/db_query_test.go new file mode 100644 index 0000000000..799d3219e8 --- /dev/null +++ b/tests/integration/db_query_test.go @@ -0,0 +1,64 @@ +// Copyright 2026 The Forgejo Authors. All rights reserved. +// SPDX-License-Identifier: GPL-3.0-or-later + +package integration + +import ( + "fmt" + "testing" + + actions_model "forgejo.org/models/actions" + "forgejo.org/models/db" + "forgejo.org/tests" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// These are basically unit tests, but by running them in the integration test suite they are tested against all +// supported database types. + +func TestDatabaseDefaultMaxInSize(t *testing.T) { + defer tests.PrepareTestEnv(t)() + + // Ensure there are more than db.DefaultMaxInSize objects in a table: + targetCount := db.DefaultMaxInSize * 2 + for i := range targetCount { + _, err := actions_model.InsertVariable(t.Context(), 2, 2, fmt.Sprintf("VAR_%d", i), fmt.Sprintf("Value %d", i)) + require.NoError(t, err) + } + + t.Run("GetByIDs", func(t *testing.T) { + defer tests.PrintCurrentTest(t)() + + allActionVariables := make([]*actions_model.ActionVariable, 0, targetCount) + err := db.GetEngine(t.Context()).Find(&allActionVariables) + require.NoError(t, err) + + allIDs := make([]int64, len(allActionVariables)) + for i := range allActionVariables { + allIDs[i] = allActionVariables[i].ID + } + + allActionVariablesAgain, err := db.GetByIDs(t.Context(), "id", allIDs, &actions_model.ActionVariable{}) + require.NoError(t, err) + assert.Len(t, allActionVariablesAgain, len(allActionVariables)) + }) + + t.Run("GetByFieldIn", func(t *testing.T) { + defer tests.PrintCurrentTest(t)() + + allActionVariables := make([]*actions_model.ActionVariable, 0, targetCount) + err := db.GetEngine(t.Context()).Find(&allActionVariables) + require.NoError(t, err) + + allIDs := make([]int64, len(allActionVariables)) + for i := range allActionVariables { + allIDs[i] = allActionVariables[i].ID + } + + allActionVariablesAgain, err := db.GetByFieldIn(t.Context(), "id", allIDs, &actions_model.ActionVariable{}) + require.NoError(t, err) + assert.Len(t, allActionVariablesAgain, len(allActionVariables)) + }) +} From 1176b58f28a73c0f78359919c2c6026d47163728 Mon Sep 17 00:00:00 2001 From: forgejo-backport-action Date: Sun, 5 Apr 2026 22:53:13 +0200 Subject: [PATCH 092/352] [v15.0/forgejo] refactor: reduce code duplication when accessing `DefaultMaxInSize` (#12000) **Backport:** https://codeberg.org/forgejo/forgejo/pulls/11999 `DefaultMaxInSize` is an internal parameter for limiting the size of `field IN (...)` clauses in DB queries, which is a reasonable thing to do -- in addition to the errors noted when [originally introduced](https://github.com/go-gitea/gitea/pull/4594), there are technical limits that apply to each of PostgreSQL, MySQL, and SQLite which would prevent an unbounded size for a query like this. However: the size is incredibly small at 50, and, the implementation of `DefaultMaxInSize` is really wasteful with copy-and-paste coding. This PR: - introduces `GetByIDs` which fetches a `map[int64]*Model` from the database for an array of ID values, while respecting `IN` clause size limits - introduces `GetByFieldIn` which fetches a `map[int64][]*Model` from the database for an array of field values, while respecting `IN` clause size limits - uses `slices.Chunk` for other locations where queries are too complex for these implementations - bumps the `DefaultMaxInSize` parameter from 50 to 500, a conservative increase well under known limits, but 10x the current value: - PostgreSQL supports up to 1GB query text size with 65,535 parameters, but I've experienced performance degradation at high value counts - MySQL supports 64MB query text size without known limits of parameter count - SQLite supports 32,766 parameters in a query ## Checklist The [contributor guide](https://forgejo.org/docs/next/contributor/) contains information that will be helpful to first time contributors. All work and communication must conform to Forgejo's [AI Agreement](https://codeberg.org/forgejo/governance/src/branch/main/AIAgreement.md). There also are a few [conditions for merging Pull Requests in Forgejo repositories](https://codeberg.org/forgejo/governance/src/branch/main/PullRequestsAgreement.md). You are also welcome to join the [Forgejo development chatroom](https://matrix.to/#/#forgejo-development:matrix.org). ### Tests for Go changes - I added test coverage for Go changes... - [x] in their respective `*_test.go` for unit tests. - Refactored functions are assumed to be covered by existing tests to some extent; that assumption is probably wrong but the changes here are relatively easily reviewed for correctness as well. - [ ] in the `tests/integration` directory if it involves interactions with a live Forgejo server. - I ran... - [x] `make pr-go` before pushing ### Documentation - [ ] I created a pull request [to the documentation](https://codeberg.org/forgejo/docs) to explain to Forgejo users how to use this change. - [x] I did not document these changes and I do not expect someone else to do it. ### Release notes - [ ] This change will be noticed by a Forgejo user or admin (feature, bug fix, performance, etc.). I suggest to include a release note for this change. - [x] This change is not visible to a Forgejo user or admin (refactor, dependency upgrade, etc.). I think there is no need to add a release note for this change. Co-authored-by: Mathieu Fenniak Reviewed-on: https://codeberg.org/forgejo/forgejo/pulls/12000 Reviewed-by: Mathieu Fenniak Co-authored-by: forgejo-backport-action Co-committed-by: forgejo-backport-action --- models/activities/notification_list.go | 112 ++--------------- models/db/context.go | 86 +++++++++++++ models/db/list.go | 2 +- models/issues/comment_list.go | 168 ++++--------------------- models/issues/issue_list.go | 151 ++++------------------ models/issues/reaction.go | 10 +- tests/integration/db_query_test.go | 64 ++++++++++ 7 files changed, 215 insertions(+), 378 deletions(-) create mode 100644 tests/integration/db_query_test.go diff --git a/models/activities/notification_list.go b/models/activities/notification_list.go index bf6356021e..3f3a48eaa5 100644 --- a/models/activities/notification_list.go +++ b/models/activities/notification_list.go @@ -210,31 +210,9 @@ func (nl NotificationList) LoadRepos(ctx context.Context) (repo_model.Repository } repoIDs := nl.getPendingRepoIDs() - repos := make(map[int64]*repo_model.Repository, len(repoIDs)) - left := len(repoIDs) - for left > 0 { - limit := min(left, db.DefaultMaxInSize) - rows, err := db.GetEngine(ctx). - In("id", repoIDs[:limit]). - Rows(new(repo_model.Repository)) - if err != nil { - return nil, nil, err - } - - for rows.Next() { - var repo repo_model.Repository - err = rows.Scan(&repo) - if err != nil { - rows.Close() - return nil, nil, err - } - - repos[repo.ID] = &repo - } - _ = rows.Close() - - left -= limit - repoIDs = repoIDs[limit:] + repos, err := db.GetByIDs(ctx, "id", repoIDs, &repo_model.Repository{}) + if err != nil { + return nil, nil, err } failed := []int{} @@ -281,31 +259,9 @@ func (nl NotificationList) LoadIssues(ctx context.Context) ([]int, error) { } issueIDs := nl.getPendingIssueIDs() - issues := make(map[int64]*issues_model.Issue, len(issueIDs)) - left := len(issueIDs) - for left > 0 { - limit := min(left, db.DefaultMaxInSize) - rows, err := db.GetEngine(ctx). - In("id", issueIDs[:limit]). - Rows(new(issues_model.Issue)) - if err != nil { - return nil, err - } - - for rows.Next() { - var issue issues_model.Issue - err = rows.Scan(&issue) - if err != nil { - rows.Close() - return nil, err - } - - issues[issue.ID] = &issue - } - _ = rows.Close() - - left -= limit - issueIDs = issueIDs[limit:] + issues, err := db.GetByIDs(ctx, "id", issueIDs, &issues_model.Issue{}) + if err != nil { + return nil, err } failures := []int{} @@ -373,31 +329,9 @@ func (nl NotificationList) LoadUsers(ctx context.Context) ([]int, error) { } userIDs := nl.getUserIDs() - users := make(map[int64]*user_model.User, len(userIDs)) - left := len(userIDs) - for left > 0 { - limit := min(left, db.DefaultMaxInSize) - rows, err := db.GetEngine(ctx). - In("id", userIDs[:limit]). - Rows(new(user_model.User)) - if err != nil { - return nil, err - } - - for rows.Next() { - var user user_model.User - err = rows.Scan(&user) - if err != nil { - rows.Close() - return nil, err - } - - users[user.ID] = &user - } - _ = rows.Close() - - left -= limit - userIDs = userIDs[limit:] + users, err := db.GetByIDs(ctx, "id", userIDs, &user_model.User{}) + if err != nil { + return nil, err } failures := []int{} @@ -421,31 +355,9 @@ func (nl NotificationList) LoadComments(ctx context.Context) ([]int, error) { } commentIDs := nl.getPendingCommentIDs() - comments := make(map[int64]*issues_model.Comment, len(commentIDs)) - left := len(commentIDs) - for left > 0 { - limit := min(left, db.DefaultMaxInSize) - rows, err := db.GetEngine(ctx). - In("id", commentIDs[:limit]). - Rows(new(issues_model.Comment)) - if err != nil { - return nil, err - } - - for rows.Next() { - var comment issues_model.Comment - err = rows.Scan(&comment) - if err != nil { - rows.Close() - return nil, err - } - - comments[comment.ID] = &comment - } - _ = rows.Close() - - left -= limit - commentIDs = commentIDs[limit:] + comments, err := db.GetByIDs(ctx, "id", commentIDs, &issues_model.Comment{}) + if err != nil { + return nil, err } failures := []int{} diff --git a/models/db/context.go b/models/db/context.go index f098b40a32..18237bb2a2 100644 --- a/models/db/context.go +++ b/models/db/context.go @@ -8,6 +8,7 @@ import ( "database/sql" "errors" "fmt" + "slices" "xorm.io/builder" "xorm.io/xorm" @@ -270,6 +271,91 @@ func GetByID[T any](ctx context.Context, id int64) (object *T, exist bool, err e return &bean, true, nil } +// Retrieves multiple objects with database queries similar to an xorm `.In(idField, idList)`. idField must be a unique +// field on the database table, as a map[id]obj is returned and the usage of a non-unique field would result in objects +// being overwritten in the map. +// +// The length of the IN list is constrained to DefaultMaxInSize for each database query, resulting in multiple database +// queries if the length of the idList exceeds that setting; this constraint prevents exceeding bind parameter +// limitations or query length limitations in the database engine. +func GetByIDs[Bean any, Id comparable](ctx context.Context, idField string, idList []Id, bean *Bean) (map[Id]*Bean, error) { + retval := make(map[Id]*Bean, len(idList)) + if len(idList) == 0 { + return retval, nil + } + + table, err := TableInfo(bean) + if err != nil { + return nil, fmt.Errorf("unable to fetch table info for bean %v: %w", bean, err) + } + + var structFieldName string + for _, c := range table.Columns() { + if c.Name == idField { + structFieldName = c.FieldName + break + } + } + if structFieldName == "" { + return nil, fmt.Errorf("unable to identify struct field for id field %s", idField) + } + + for idChunk := range slices.Chunk(idList, DefaultMaxInSize) { + beans := make([]*Bean, 0, len(idChunk)) + if err := GetEngine(ctx).In(idField, idChunk).Find(&beans); err != nil { + return nil, err + } + for _, bean := range beans { + retval[extractFieldValue(bean, structFieldName).(Id)] = bean + } + } + + return retval, nil +} + +// Retrieves multiple objects with database queries similar to an xorm `.In(field, valueList)`. Similar to GetByIDs, +// except that a map[Id][]*Bean is returned as the field value is not assumed to be a unique value -- if there are +// multiple rows in the table for each value, all of them are returned. +// +// The length of the IN list is constrained to DefaultMaxInSize for each database query, resulting in multiple database +// queries if the length of the idList exceeds that setting; this constraint prevents exceeding bind parameter +// limitations or query length limitations in the database engine. +func GetByFieldIn[Bean any, Id comparable](ctx context.Context, field string, valueList []Id, bean *Bean) (map[Id][]*Bean, error) { + retval := make(map[Id][]*Bean, len(valueList)) + if len(valueList) == 0 { + return retval, nil + } + + table, err := TableInfo(bean) + if err != nil { + return nil, fmt.Errorf("unable to fetch table info for bean %v: %w", bean, err) + } + + var structFieldName string + for _, c := range table.Columns() { + if c.Name == field { + structFieldName = c.FieldName + break + } + } + if structFieldName == "" { + return nil, fmt.Errorf("unable to identify struct field for field %s", field) + } + + for idChunk := range slices.Chunk(valueList, DefaultMaxInSize) { + beans := make([]*Bean, 0, len(idChunk)) + if err := GetEngine(ctx).In(field, idChunk).Find(&beans); err != nil { + return nil, err + } + for _, bean := range beans { + fieldValue := extractFieldValue(bean, structFieldName).(Id) + retval[fieldValue] = append(retval[fieldValue], bean) + } + } + + return retval, nil +} + func Exist[T any](ctx context.Context, cond builder.Cond) (bool, error) { if !cond.IsValid() { panic("cond is invalid in db.Exist(ctx, cond). This should not be possible.") diff --git a/models/db/list.go b/models/db/list.go index 057221936c..71e9a0b1d2 100644 --- a/models/db/list.go +++ b/models/db/list.go @@ -14,7 +14,7 @@ import ( const ( // DefaultMaxInSize represents default variables number on IN () in SQL - DefaultMaxInSize = 50 + DefaultMaxInSize = 500 defaultFindSliceSize = 10 ) diff --git a/models/issues/comment_list.go b/models/issues/comment_list.go index 9a5c22244b..b218f11dfa 100644 --- a/models/issues/comment_list.go +++ b/models/issues/comment_list.go @@ -52,29 +52,9 @@ func (comments CommentList) loadLabels(ctx context.Context) error { } labelIDs := comments.getLabelIDs() - commentLabels := make(map[int64]*Label, len(labelIDs)) - left := len(labelIDs) - for left > 0 { - limit := min(left, db.DefaultMaxInSize) - rows, err := db.GetEngine(ctx). - In("id", labelIDs[:limit]). - Rows(new(Label)) - if err != nil { - return err - } - - for rows.Next() { - var label Label - err = rows.Scan(&label) - if err != nil { - _ = rows.Close() - return err - } - commentLabels[label.ID] = &label - } - _ = rows.Close() - left -= limit - labelIDs = labelIDs[limit:] + commentLabels, err := db.GetByIDs(ctx, "id", labelIDs, &Label{}) + if err != nil { + return err } for _, comment := range comments { @@ -99,18 +79,9 @@ func (comments CommentList) loadMilestones(ctx context.Context) error { return nil } - milestones := make(map[int64]*Milestone, len(milestoneIDs)) - left := len(milestoneIDs) - for left > 0 { - limit := min(left, db.DefaultMaxInSize) - err := db.GetEngine(ctx). - In("id", milestoneIDs[:limit]). - Find(&milestones) - if err != nil { - return err - } - left -= limit - milestoneIDs = milestoneIDs[limit:] + milestones, err := db.GetByIDs(ctx, "id", milestoneIDs, &Milestone{}) + if err != nil { + return err } for _, comment := range comments { @@ -135,18 +106,9 @@ func (comments CommentList) loadOldMilestones(ctx context.Context) error { return nil } - milestones := make(map[int64]*Milestone, len(milestoneIDs)) - left := len(milestoneIDs) - for left > 0 { - limit := min(left, db.DefaultMaxInSize) - err := db.GetEngine(ctx). - In("id", milestoneIDs[:limit]). - Find(&milestones) - if err != nil { - return err - } - left -= limit - milestoneIDs = milestoneIDs[limit:] + milestones, err := db.GetByIDs(ctx, "id", milestoneIDs, &Milestone{}) + if err != nil { + return err } for _, comment := range comments { @@ -167,31 +129,9 @@ func (comments CommentList) loadAssignees(ctx context.Context) error { } assigneeIDs := comments.getAssigneeIDs() - assignees := make(map[int64]*user_model.User, len(assigneeIDs)) - left := len(assigneeIDs) - for left > 0 { - limit := min(left, db.DefaultMaxInSize) - rows, err := db.GetEngine(ctx). - In("id", assigneeIDs[:limit]). - Rows(new(user_model.User)) - if err != nil { - return err - } - - for rows.Next() { - var user user_model.User - err = rows.Scan(&user) - if err != nil { - rows.Close() - return err - } - - assignees[user.ID] = &user - } - _ = rows.Close() - - left -= limit - assigneeIDs = assigneeIDs[limit:] + assignees, err := db.GetByIDs(ctx, "id", assigneeIDs, &user_model.User{}) + if err != nil { + return err } for _, comment := range comments { @@ -232,31 +172,9 @@ func (comments CommentList) LoadIssues(ctx context.Context) error { } issueIDs := comments.getIssueIDs() - issues := make(map[int64]*Issue, len(issueIDs)) - left := len(issueIDs) - for left > 0 { - limit := min(left, db.DefaultMaxInSize) - rows, err := db.GetEngine(ctx). - In("id", issueIDs[:limit]). - Rows(new(Issue)) - if err != nil { - return err - } - - for rows.Next() { - var issue Issue - err = rows.Scan(&issue) - if err != nil { - rows.Close() - return err - } - - issues[issue.ID] = &issue - } - _ = rows.Close() - - left -= limit - issueIDs = issueIDs[limit:] + issues, err := db.GetByIDs(ctx, "id", issueIDs, &Issue{}) + if err != nil { + return err } for _, comment := range comments { @@ -281,33 +199,10 @@ func (comments CommentList) loadDependentIssues(ctx context.Context) error { return nil } - e := db.GetEngine(ctx) issueIDs := comments.getDependentIssueIDs() - issues := make(map[int64]*Issue, len(issueIDs)) - left := len(issueIDs) - for left > 0 { - limit := min(left, db.DefaultMaxInSize) - rows, err := e. - In("id", issueIDs[:limit]). - Rows(new(Issue)) - if err != nil { - return err - } - - for rows.Next() { - var issue Issue - err = rows.Scan(&issue) - if err != nil { - _ = rows.Close() - return err - } - - issues[issue.ID] = &issue - } - _ = rows.Close() - - left -= limit - issueIDs = issueIDs[limit:] + issues, err := db.GetByIDs(ctx, "id", issueIDs, &Issue{}) + if err != nil { + return err } for _, comment := range comments { @@ -358,31 +253,10 @@ func (comments CommentList) LoadAttachments(ctx context.Context) (err error) { return nil } - attachments := make(map[int64][]*repo_model.Attachment, len(comments)) commentsIDs := comments.getAttachmentCommentIDs() - left := len(commentsIDs) - for left > 0 { - limit := min(left, db.DefaultMaxInSize) - rows, err := db.GetEngine(ctx). - In("comment_id", commentsIDs[:limit]). - Rows(new(repo_model.Attachment)) - if err != nil { - return err - } - - for rows.Next() { - var attachment repo_model.Attachment - err = rows.Scan(&attachment) - if err != nil { - _ = rows.Close() - return err - } - attachments[attachment.CommentID] = append(attachments[attachment.CommentID], &attachment) - } - - _ = rows.Close() - left -= limit - commentsIDs = commentsIDs[limit:] + attachments, err := db.GetByFieldIn(ctx, "comment_id", commentsIDs, &repo_model.Attachment{}) + if err != nil { + return err } for _, comment := range comments { diff --git a/models/issues/issue_list.go b/models/issues/issue_list.go index 34cfe35475..e4fd9eef2b 100644 --- a/models/issues/issue_list.go +++ b/models/issues/issue_list.go @@ -6,6 +6,7 @@ package issues import ( "context" "fmt" + "slices" "forgejo.org/models/db" project_model "forgejo.org/models/project" @@ -40,18 +41,9 @@ func (issues IssueList) LoadRepositories(ctx context.Context) (repo_model.Reposi } repoIDs := issues.getRepoIDs() - repoMaps := make(map[int64]*repo_model.Repository, len(repoIDs)) - left := len(repoIDs) - for left > 0 { - limit := min(left, db.DefaultMaxInSize) - err := db.GetEngine(ctx). - In("id", repoIDs[:limit]). - Find(&repoMaps) - if err != nil { - return nil, fmt.Errorf("find repository: %w", err) - } - left -= limit - repoIDs = repoIDs[limit:] + repoMaps, err := db.GetByIDs(ctx, "id", repoIDs, &repo_model.Repository{}) + if err != nil { + return nil, fmt.Errorf("find repository: %w", err) } for _, issue := range issues { @@ -93,18 +85,9 @@ func (issues IssueList) LoadPosters(ctx context.Context) error { } func getPostersByIDs(ctx context.Context, posterIDs []int64) (map[int64]*user_model.User, error) { - posterMaps := make(map[int64]*user_model.User, len(posterIDs)) - left := len(posterIDs) - for left > 0 { - limit := min(left, db.DefaultMaxInSize) - err := db.GetEngine(ctx). - In("id", posterIDs[:limit]). - Find(&posterMaps) - if err != nil { - return nil, err - } - left -= limit - posterIDs = posterIDs[limit:] + posterMaps, err := db.GetByIDs(ctx, "id", posterIDs, &user_model.User{}) + if err != nil { + return nil, err } return posterMaps, nil } @@ -129,18 +112,15 @@ func (issues IssueList) LoadLabels(ctx context.Context) error { issueLabels := make(map[int64][]*Label, len(issues)*3) issueIDs := issues.getIssueIDs() - left := len(issueIDs) - for left > 0 { - limit := min(left, db.DefaultMaxInSize) + for issueIDChunk := range slices.Chunk(issueIDs, db.DefaultMaxInSize) { rows, err := db.GetEngine(ctx).Table("label"). Join("LEFT", "issue_label", "issue_label.label_id = label.id"). - In("issue_label.issue_id", issueIDs[:limit]). + In("issue_label.issue_id", issueIDChunk). Asc("label.name"). Rows(new(LabelIssue)) if err != nil { return err } - for rows.Next() { var labelIssue LabelIssue err = rows.Scan(&labelIssue) @@ -157,8 +137,6 @@ func (issues IssueList) LoadLabels(ctx context.Context) error { if err1 := rows.Close(); err1 != nil { return fmt.Errorf("IssueList.LoadLabels: Close: %w", err1) } - left -= limit - issueIDs = issueIDs[limit:] } for _, issue := range issues { @@ -180,18 +158,9 @@ func (issues IssueList) LoadMilestones(ctx context.Context) error { return nil } - milestoneMaps := make(map[int64]*Milestone, len(milestoneIDs)) - left := len(milestoneIDs) - for left > 0 { - limit := min(left, db.DefaultMaxInSize) - err := db.GetEngine(ctx). - In("id", milestoneIDs[:limit]). - Find(&milestoneMaps) - if err != nil { - return err - } - left -= limit - milestoneIDs = milestoneIDs[limit:] + milestoneMaps, err := db.GetByIDs(ctx, "id", milestoneIDs, &Milestone{}) + if err != nil { + return err } for _, issue := range issues { @@ -204,22 +173,19 @@ func (issues IssueList) LoadMilestones(ctx context.Context) error { func (issues IssueList) LoadProjects(ctx context.Context) error { issueIDs := issues.getIssueIDs() projectMaps := make(map[int64]*project_model.Project, len(issues)) - left := len(issueIDs) type projectWithIssueID struct { *project_model.Project `xorm:"extends"` IssueID int64 } - for left > 0 { - limit := min(left, db.DefaultMaxInSize) - - projects := make([]*projectWithIssueID, 0, limit) + for issueIDChunk := range slices.Chunk(issueIDs, db.DefaultMaxInSize) { + projects := make([]*projectWithIssueID, 0, len(issueIDChunk)) err := db.GetEngine(ctx). Table("project"). Select("project.*, project_issue.issue_id"). Join("INNER", "project_issue", "project.id = project_issue.project_id"). - In("project_issue.issue_id", issueIDs[:limit]). + In("project_issue.issue_id", issueIDChunk). Find(&projects) if err != nil { return err @@ -227,8 +193,6 @@ func (issues IssueList) LoadProjects(ctx context.Context) error { for _, project := range projects { projectMaps[project.IssueID] = project.Project } - left -= limit - issueIDs = issueIDs[limit:] } for _, issue := range issues { @@ -249,12 +213,10 @@ func (issues IssueList) LoadAssignees(ctx context.Context) error { assignees := make(map[int64][]*user_model.User, len(issues)) issueIDs := issues.getIssueIDs() - left := len(issueIDs) - for left > 0 { - limit := min(left, db.DefaultMaxInSize) + for issueIDChunk := range slices.Chunk(issueIDs, db.DefaultMaxInSize) { rows, err := db.GetEngine(ctx).Table("issue_assignees"). Join("INNER", "`user`", "`user`.id = `issue_assignees`.assignee_id"). - In("`issue_assignees`.issue_id", issueIDs[:limit]).OrderBy(user_model.GetOrderByName()). + In("`issue_assignees`.issue_id", issueIDChunk).OrderBy(user_model.GetOrderByName()). Rows(new(AssigneeIssue)) if err != nil { return err @@ -275,8 +237,6 @@ func (issues IssueList) LoadAssignees(ctx context.Context) error { if err1 := rows.Close(); err1 != nil { return fmt.Errorf("IssueList.loadAssignees: Close: %w", err1) } - left -= limit - issueIDs = issueIDs[limit:] } for _, issue := range issues { @@ -306,33 +266,9 @@ func (issues IssueList) LoadPullRequests(ctx context.Context) error { return nil } - pullRequestMaps := make(map[int64]*PullRequest, len(issuesIDs)) - left := len(issuesIDs) - for left > 0 { - limit := min(left, db.DefaultMaxInSize) - rows, err := db.GetEngine(ctx). - In("issue_id", issuesIDs[:limit]). - Rows(new(PullRequest)) - if err != nil { - return err - } - - for rows.Next() { - var pr PullRequest - err = rows.Scan(&pr) - if err != nil { - if err1 := rows.Close(); err1 != nil { - return fmt.Errorf("IssueList.loadPullRequests: Close: %w", err1) - } - return err - } - pullRequestMaps[pr.IssueID] = &pr - } - if err1 := rows.Close(); err1 != nil { - return fmt.Errorf("IssueList.loadPullRequests: Close: %w", err1) - } - left -= limit - issuesIDs = issuesIDs[limit:] + pullRequestMaps, err := db.GetByIDs(ctx, "issue_id", issuesIDs, &PullRequest{}) + if err != nil { + return err } for _, issue := range issues { @@ -350,34 +286,10 @@ func (issues IssueList) LoadAttachments(ctx context.Context) (err error) { return nil } - attachments := make(map[int64][]*repo_model.Attachment, len(issues)) issuesIDs := issues.getIssueIDs() - left := len(issuesIDs) - for left > 0 { - limit := min(left, db.DefaultMaxInSize) - rows, err := db.GetEngine(ctx). - In("issue_id", issuesIDs[:limit]). - Rows(new(repo_model.Attachment)) - if err != nil { - return err - } - - for rows.Next() { - var attachment repo_model.Attachment - err = rows.Scan(&attachment) - if err != nil { - if err1 := rows.Close(); err1 != nil { - return fmt.Errorf("IssueList.loadAttachments: Close: %w", err1) - } - return err - } - attachments[attachment.IssueID] = append(attachments[attachment.IssueID], &attachment) - } - if err1 := rows.Close(); err1 != nil { - return fmt.Errorf("IssueList.loadAttachments: Close: %w", err1) - } - left -= limit - issuesIDs = issuesIDs[limit:] + attachments, err := db.GetByFieldIn(ctx, "issue_id", issuesIDs, &repo_model.Attachment{}) + if err != nil { + return err } for _, issue := range issues { @@ -394,12 +306,10 @@ func (issues IssueList) loadComments(ctx context.Context, cond builder.Cond) (er comments := make(map[int64][]*Comment, len(issues)) issuesIDs := issues.getIssueIDs() - left := len(issuesIDs) - for left > 0 { - limit := min(left, db.DefaultMaxInSize) + for issueIDChunk := range slices.Chunk(issuesIDs, db.DefaultMaxInSize) { rows, err := db.GetEngine(ctx).Table("comment"). Join("INNER", "issue", "issue.id = comment.issue_id"). - In("issue.id", issuesIDs[:limit]). + In("issue.id", issueIDChunk). Where(cond). Rows(new(Comment)) if err != nil { @@ -420,8 +330,6 @@ func (issues IssueList) loadComments(ctx context.Context, cond builder.Cond) (er if err1 := rows.Close(); err1 != nil { return fmt.Errorf("IssueList.loadComments: Close: %w", err1) } - left -= limit - issuesIDs = issuesIDs[limit:] } for _, issue := range issues { @@ -457,15 +365,12 @@ func (issues IssueList) loadTotalTrackedTimes(ctx context.Context) (err error) { } } - left := len(ids) - for left > 0 { - limit := min(left, db.DefaultMaxInSize) - + for idChunk := range slices.Chunk(ids, db.DefaultMaxInSize) { // select issue_id, sum(time) from tracked_time where issue_id in () group by issue_id rows, err := db.GetEngine(ctx).Table("tracked_time"). Where("deleted = ?", false). Select("issue_id, sum(time) as time"). - In("issue_id", ids[:limit]). + In("issue_id", idChunk). GroupBy("issue_id"). Rows(new(totalTimesByIssue)) if err != nil { @@ -486,8 +391,6 @@ func (issues IssueList) loadTotalTrackedTimes(ctx context.Context) (err error) { if err1 := rows.Close(); err1 != nil { return fmt.Errorf("IssueList.loadTotalTrackedTimes: Close: %w", err1) } - left -= limit - ids = ids[limit:] } for _, issue := range issues { diff --git a/models/issues/reaction.go b/models/issues/reaction.go index 9a277a8c12..21975c6b00 100644 --- a/models/issues/reaction.go +++ b/models/issues/reaction.go @@ -7,6 +7,7 @@ import ( "bytes" "context" "fmt" + "slices" "forgejo.org/models/db" repo_model "forgejo.org/models/repo" @@ -178,13 +179,12 @@ func FindReactions(ctx context.Context, opts FindReactionsOptions) (ReactionList func getReactionsForComments(ctx context.Context, issueID int64, commentIDs []int64) (map[int64]ReactionList, error) { reactions := make(map[int64]ReactionList, len(commentIDs)) - left := len(commentIDs) - for left > 0 { - limit := min(left, db.DefaultMaxInSize) + + for commentIDChunk := range slices.Chunk(commentIDs, db.DefaultMaxInSize) { rows, err := db.GetEngine(ctx). Where(builder.Eq{"issue_id": issueID}). In("reaction.`type`", setting.UI.Reactions). - In("comment_id", commentIDs[:limit]). + In("comment_id", commentIDChunk). Rows(&Reaction{}) if err != nil { return nil, err @@ -201,8 +201,6 @@ func getReactionsForComments(ctx context.Context, issueID int64, commentIDs []in } _ = rows.Close() - left -= limit - commentIDs = commentIDs[limit:] } return reactions, nil } diff --git a/tests/integration/db_query_test.go b/tests/integration/db_query_test.go new file mode 100644 index 0000000000..799d3219e8 --- /dev/null +++ b/tests/integration/db_query_test.go @@ -0,0 +1,64 @@ +// Copyright 2026 The Forgejo Authors. All rights reserved. +// SPDX-License-Identifier: GPL-3.0-or-later + +package integration + +import ( + "fmt" + "testing" + + actions_model "forgejo.org/models/actions" + "forgejo.org/models/db" + "forgejo.org/tests" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// These are basically unit tests, but by running them in the integration test suite they are tested against all +// supported database types. + +func TestDatabaseDefaultMaxInSize(t *testing.T) { + defer tests.PrepareTestEnv(t)() + + // Ensure there are more than db.DefaultMaxInSize objects in a table: + targetCount := db.DefaultMaxInSize * 2 + for i := range targetCount { + _, err := actions_model.InsertVariable(t.Context(), 2, 2, fmt.Sprintf("VAR_%d", i), fmt.Sprintf("Value %d", i)) + require.NoError(t, err) + } + + t.Run("GetByIDs", func(t *testing.T) { + defer tests.PrintCurrentTest(t)() + + allActionVariables := make([]*actions_model.ActionVariable, 0, targetCount) + err := db.GetEngine(t.Context()).Find(&allActionVariables) + require.NoError(t, err) + + allIDs := make([]int64, len(allActionVariables)) + for i := range allActionVariables { + allIDs[i] = allActionVariables[i].ID + } + + allActionVariablesAgain, err := db.GetByIDs(t.Context(), "id", allIDs, &actions_model.ActionVariable{}) + require.NoError(t, err) + assert.Len(t, allActionVariablesAgain, len(allActionVariables)) + }) + + t.Run("GetByFieldIn", func(t *testing.T) { + defer tests.PrintCurrentTest(t)() + + allActionVariables := make([]*actions_model.ActionVariable, 0, targetCount) + err := db.GetEngine(t.Context()).Find(&allActionVariables) + require.NoError(t, err) + + allIDs := make([]int64, len(allActionVariables)) + for i := range allActionVariables { + allIDs[i] = allActionVariables[i].ID + } + + allActionVariablesAgain, err := db.GetByFieldIn(t.Context(), "id", allIDs, &actions_model.ActionVariable{}) + require.NoError(t, err) + assert.Len(t, allActionVariablesAgain, len(allActionVariables)) + }) +} From d4f7b536bc11515feb553de0dd29e9020f20879b Mon Sep 17 00:00:00 2001 From: forgejo-backport-action Date: Mon, 6 Apr 2026 02:44:05 +0200 Subject: [PATCH 093/352] [v15.0/forgejo] fix: missing syntax dialog rounded corners (#11987) **Backport:** https://codeberg.org/forgejo/forgejo/pulls/11945 Fixes #11299 Co-authored-by: grangelouis Reviewed-on: https://codeberg.org/forgejo/forgejo/pulls/11987 Reviewed-by: 0ko <0ko@noreply.codeberg.org> Co-authored-by: forgejo-backport-action Co-committed-by: forgejo-backport-action --- web_src/css/modules/dialog.css | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/web_src/css/modules/dialog.css b/web_src/css/modules/dialog.css index 897eb838cf..3f9a1a44bf 100644 --- a/web_src/css/modules/dialog.css +++ b/web_src/css/modules/dialog.css @@ -88,6 +88,11 @@ dialog .actions { padding: 1rem; } +dialog footer { + border-bottom-left-radius: var(--border-radius); + border-bottom-right-radius: var(--border-radius); +} + /* positive/negative action buttons */ dialog .actions .ui.button { display: inline-flex; From a4ec71067c817d9db0605b6a2f99ebbbcaf80580 Mon Sep 17 00:00:00 2001 From: Mathieu Fenniak Date: Mon, 6 Apr 2026 02:45:39 +0200 Subject: [PATCH 094/352] [v15.0/forgejo] chore(deps): bump xorm to v1.3.9-forgejo.10 (#11992) (#11996) Backport #11992. As I'm intending to fix the intermittently failing test (#11968), I'd like to backport that so we don't have an intermittent failing test in the LTS, and this is a requirement. Brings [deadlock error type](https://code.forgejo.org/xorm/xorm/pulls/95), which should allow fixing #11968. (cherry picked from commit 15b4c5efe81ba47b6415a9c188981813aec7f775) Reviewed-on: https://codeberg.org/forgejo/forgejo/pulls/11992 Reviewed-by: Andreas Ahlenstorf Co-authored-by: Mathieu Fenniak Co-committed-by: Mathieu Fenniak Reviewed-on: https://codeberg.org/forgejo/forgejo/pulls/11996 Reviewed-by: Otto --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index b37a7eb424..1bc6acec46 100644 --- a/go.mod +++ b/go.mod @@ -75,7 +75,7 @@ require ( github.com/klauspost/cpuid/v2 v2.2.11 github.com/markbates/goth v1.82.0 github.com/mattn/go-isatty v0.0.20 - github.com/mattn/go-sqlite3 v1.14.37 + github.com/mattn/go-sqlite3 v1.14.40 github.com/meilisearch/meilisearch-go v0.36.0 github.com/mholt/archives v0.1.5 github.com/microcosm-cc/bluemonday v1.0.27 @@ -274,4 +274,4 @@ replace github.com/gliderlabs/ssh => code.forgejo.org/forgejo/ssh v0.0.0-2024121 replace git.sr.ht/~mariusor/go-xsd-duration => code.forgejo.org/forgejo/go-xsd-duration v0.0.0-20220703122237-02e73435a078 -replace xorm.io/xorm v1.3.9 => code.forgejo.org/xorm/xorm v1.3.9-forgejo.9 +replace xorm.io/xorm v1.3.9 => code.forgejo.org/xorm/xorm v1.3.9-forgejo.10 diff --git a/go.sum b/go.sum index 76e13223a8..54ee91755c 100644 --- a/go.sum +++ b/go.sum @@ -42,8 +42,8 @@ code.forgejo.org/go-chi/captcha v1.0.2 h1:vyHDPXkpjDv8bLO9NqtWzZayzstD/WpJ5xwEkA code.forgejo.org/go-chi/captcha v1.0.2/go.mod h1:lxiPLcJ76UCZHoH31/Wbum4GUi2NgjfFZLrJkKv1lLE= code.forgejo.org/go-chi/session v1.0.3 h1:ByJ9c/UC0AU57hxiGl53TXh+NdBOBwK/bhZ9jyadEwE= code.forgejo.org/go-chi/session v1.0.3/go.mod h1:xzGtFrV/agCJoZCUhFDlqAr1he6BrAdqlaprKOB1W90= -code.forgejo.org/xorm/xorm v1.3.9-forgejo.9 h1:hzEXDa53opdp5nrGG4F6y8HzFzrGXd5GIvFyUHcvGmI= -code.forgejo.org/xorm/xorm v1.3.9-forgejo.9/go.mod h1:A7sFd3BFmRp20h6drSsCXgQRQdF8Vz8HuCSrzFS3m90= +code.forgejo.org/xorm/xorm v1.3.9-forgejo.10 h1:DCProZz7GP10ue7NoVr1vreuADhH9tEImYFye2+aDG8= +code.forgejo.org/xorm/xorm v1.3.9-forgejo.10/go.mod h1:ly5tUt9l3b+y7HdXDM1UucQXuS58ahNxB9tPM5/6LfM= code.gitea.io/sdk/gitea v0.21.0 h1:69n6oz6kEVHRo1+APQQyizkhrZrLsTLXey9142pfkD4= code.gitea.io/sdk/gitea v0.21.0/go.mod h1:tnBjVhuKJCn8ibdyyhvUyxrR1Ca2KHEoTWoukNhXQPA= code.superseriousbusiness.org/exif-terminator v0.11.1 h1:qnujLH4/Yk/CFtFMmtjozbdV6Ry5G3Q/E/mLlWm/gQI= @@ -520,8 +520,8 @@ github.com/mattn/go-runewidth v0.0.17 h1:78v8ZlW0bP43XfmAfPsdXcoNCelfMHsDmd/pkEN github.com/mattn/go-runewidth v0.0.17/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk= github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= -github.com/mattn/go-sqlite3 v1.14.37 h1:3DOZp4cXis1cUIpCfXLtmlGolNLp2VEqhiB/PARNBIg= -github.com/mattn/go-sqlite3 v1.14.37/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= +github.com/mattn/go-sqlite3 v1.14.40 h1:f7+saIsbq4EF86mUqe0uiecQOJYMOdfi5uATADmUG94= +github.com/mattn/go-sqlite3 v1.14.40/go.mod h1:pjEuOr8IwzLJP2MfGeTb0A35jauH+C2kbHKBr7yXKVQ= github.com/meilisearch/meilisearch-go v0.36.0 h1:N1etykTektXt5KPcSbhBO0d5Xx5NaKj4pJWEM7WA5dI= github.com/meilisearch/meilisearch-go v0.36.0/go.mod h1:HBfHzKMxcSbTOvqdfuRA/yf6Vk9IivcwKocWRuW7W78= github.com/mholt/acmez/v3 v3.1.2 h1:auob8J/0FhmdClQicvJvuDavgd5ezwLBfKuYmynhYzc= From 825c2a174491ec4dda4146f8c6f7b1cbff76d170 Mon Sep 17 00:00:00 2001 From: forgejo-backport-action Date: Mon, 6 Apr 2026 03:39:53 +0200 Subject: [PATCH 095/352] [v15.0/forgejo] test: fix intermittent test failure in TestPackageDebianConcurrent (#11998) **Backport:** https://codeberg.org/forgejo/forgejo/pulls/11997 Fixes #11968. Adds deadlocks to the package `RetryTx` operations, and bumps the attempt count to 3. Technically this affects production code, not just test code, but the resulting failure is only likely to occur in highly concurrent operations when uploading packages to the debian registry for the first time for a user, which is more of a test artifact than a production likelihood. Manually tested by modifying the `Makefile` to add the `-test.count=25` option to the test command. This failed consistently on my dev system before this change, failed consistently after the deadlock err was added, and then succeeded consistently (multiple runs) after both changes were combined, giving me confidence that the intermittent failure is squashed. ## Checklist The [contributor guide](https://forgejo.org/docs/next/contributor/) contains information that will be helpful to first time contributors. All work and communication must conform to Forgejo's [AI Agreement](https://codeberg.org/forgejo/governance/src/branch/main/AIAgreement.md). There also are a few [conditions for merging Pull Requests in Forgejo repositories](https://codeberg.org/forgejo/governance/src/branch/main/PullRequestsAgreement.md). You are also welcome to join the [Forgejo development chatroom](https://matrix.to/#/#forgejo-development:matrix.org). ### Tests for Go changes - I added test coverage for Go changes... - [ ] in their respective `*_test.go` for unit tests. - [x] in the `tests/integration` directory if it involves interactions with a live Forgejo server. - Fixing a test failure, so no new tests added, but they already failed. - I ran... - [x] `make pr-go` before pushing ### Documentation - [ ] I created a pull request [to the documentation](https://codeberg.org/forgejo/docs) to explain to Forgejo users how to use this change. - [x] I did not document these changes and I do not expect someone else to do it. ### Release notes - [ ] This change will be noticed by a Forgejo user or admin (feature, bug fix, performance, etc.). I suggest to include a release note for this change. - [x] This change is not visible to a Forgejo user or admin (refactor, dependency upgrade, etc.). I think there is no need to add a release note for this change. Co-authored-by: Mathieu Fenniak Reviewed-on: https://codeberg.org/forgejo/forgejo/pulls/11998 Reviewed-by: Mathieu Fenniak Co-authored-by: forgejo-backport-action Co-committed-by: forgejo-backport-action --- services/packages/debian/repository.go | 7 ++++--- services/packages/packages.go | 21 ++++++++++++--------- 2 files changed, 16 insertions(+), 12 deletions(-) diff --git a/services/packages/debian/repository.go b/services/packages/debian/repository.go index ab8c4fdc45..e84ae45ebe 100644 --- a/services/packages/debian/repository.go +++ b/services/packages/debian/repository.go @@ -318,9 +318,10 @@ func buildReleaseFiles(ctx context.Context, ownerID int64, repoVersion *packages // way. var priv string err = db.RetryTx(ctx, db.RetryConfig{ - // A single retry is sufficient the user/org's key pair would have been created by the first successful tx. - AttemptCount: 2, - ErrorIs: []error{xorm.ErrUniqueConstraintViolation}, + // A single retry is sufficient the user/org's key pair would have been created by the first successful tx; an + // additional retry may be necessary if a deadlock occurs with concurrent updates. + AttemptCount: 3, + ErrorIs: []error{xorm.ErrUniqueConstraintViolation, xorm.ErrDeadlock}, }, func(ctx context.Context) error { priv, _, err = GetOrCreateKeyPair(ctx, ownerID) return err diff --git a/services/packages/packages.go b/services/packages/packages.go index a1772cc1d9..c75b4559c8 100644 --- a/services/packages/packages.go +++ b/services/packages/packages.go @@ -95,9 +95,10 @@ func createPackageAndAddFile(ctx context.Context, pvci *PackageCreationInfo, pfc // causes such a recovery from error to panic. So, we retry the entire modification transaction if // ErrUniqueConstraintViolation is encountered. err := db.RetryTx(ctx, db.RetryConfig{ - // A single retry is sufficient as any package index that was concurrently modified should now be present: - AttemptCount: 2, - ErrorIs: []error{xorm.ErrUniqueConstraintViolation}, + // A single retry is sufficient as any package index that was concurrently modified should now be present; an + // additional retry may be necessary if a deadlock occurs with concurrent updates. + AttemptCount: 3, + ErrorIs: []error{xorm.ErrUniqueConstraintViolation, xorm.ErrDeadlock}, }, func(ctx context.Context) error { var err error var pb *packages_model.PackageBlob @@ -237,9 +238,10 @@ func addFileToPackageWrapper(ctx context.Context, fn func(ctx context.Context) ( // See comment in createPackageAndAddFile which explains why RetryTx is used with ErrUniqueConstraintViolation. err := db.RetryTx(ctx, db.RetryConfig{ - // A single retry is sufficient as any package index that was concurrently modified should now be present: - AttemptCount: 2, - ErrorIs: []error{xorm.ErrUniqueConstraintViolation}, + // A single retry is sufficient as any package index that was concurrently modified should now be present; an + // additional retry may be necessary if a deadlock occurs with concurrent updates. + AttemptCount: 3, + ErrorIs: []error{xorm.ErrUniqueConstraintViolation, xorm.ErrDeadlock}, }, func(ctx context.Context) error { var err error var blobCreated bool @@ -468,9 +470,10 @@ func GetOrCreateInternalPackageVersion(ctx context.Context, ownerID int64, packa // See comment in createPackageAndAddFile which explains why RetryTx is used with ErrUniqueConstraintViolation. return pv, db.RetryTx(ctx, db.RetryConfig{ - // A single retry is sufficient as any package index that was concurrently modified should now be present: - AttemptCount: 2, - ErrorIs: []error{xorm.ErrUniqueConstraintViolation}, + // A single retry is sufficient as any package index that was concurrently modified should now be present; an + // additional retry may be necessary if a deadlock occurs with concurrent updates. + AttemptCount: 3, + ErrorIs: []error{xorm.ErrUniqueConstraintViolation, xorm.ErrDeadlock}, }, func(ctx context.Context) error { p := &packages_model.Package{ OwnerID: ownerID, From f18873f83be2369b34222bbf1c4a5a6bd85b5d4f Mon Sep 17 00:00:00 2001 From: elbaro Date: Mon, 6 Apr 2026 03:43:41 +0200 Subject: [PATCH 096/352] feat: add /actions/runs/{id}/jobs (#11915) This PR is a minimal implementation to add `/actions/runs/{id}/jobs` (#11859). This endpoint is also required by `/actions/jobs/{id}/logs`. The pagination, filtering, custom sorting, more response fields are left to future work. ## Usage ``` curl -X 'GET' \ 'https://hostname/api/v1/repos/{owner}/{repo}/actions/runs/{id}/jobs' \ -H 'accept: application/json' ``` ## Checklist The [contributor guide](https://forgejo.org/docs/next/contributor/) contains information that will be helpful to first time contributors. All work and communication must conform to Forgejo's [AI Agreement](https://codeberg.org/forgejo/governance/src/branch/main/AIAgreement.md). There also are a few [conditions for merging Pull Requests in Forgejo repositories](https://codeberg.org/forgejo/governance/src/branch/main/PullRequestsAgreement.md). You are also welcome to join the [Forgejo development chatroom](https://matrix.to/#/#forgejo-development:matrix.org). ### Tests for Go changes - I added test coverage for Go changes... - [x] in their respective `*_test.go` for unit tests. - [ ] in the `tests/integration` directory if it involves interactions with a live Forgejo server. - I ran... - [x] `make pr-go` before pushing ### Documentation - [ ] I created a pull request [to the documentation](https://codeberg.org/forgejo/docs) to explain to Forgejo users how to use this change. - [x] I did not document these changes and I do not expect someone else to do it. ### Release notes - [x] This change will be noticed by a Forgejo user or admin (feature, bug fix, performance, etc.). I suggest to include a release note for this change. - [ ] This change is not visible to a Forgejo user or admin (refactor, dependency upgrade, etc.). I think there is no need to add a release note for this change. Co-authored-by: elbaro Reviewed-on: https://codeberg.org/forgejo/forgejo/pulls/11915 Reviewed-by: Andreas Ahlenstorf Reviewed-by: Mathieu Fenniak Co-authored-by: elbaro Co-committed-by: elbaro --- routers/api/v1/api.go | 1 + routers/api/v1/repo/action.go | 65 +++++++++++++ routers/api/v1/shared/runners.go | 13 +-- routers/api/v1/swagger/repo.go | 7 ++ services/convert/action.go | 19 ++++ templates/swagger/v1_json.tmpl | 59 ++++++++++++ tests/integration/api_repo_actions_test.go | 107 +++++++++++++++++++++ 7 files changed, 259 insertions(+), 12 deletions(-) diff --git a/routers/api/v1/api.go b/routers/api/v1/api.go index 2c10464438..d0b8531aea 100644 --- a/routers/api/v1/api.go +++ b/routers/api/v1/api.go @@ -1238,6 +1238,7 @@ func Routes() *web.Route { m.Group("/runs", func() { m.Get("", repo.ListActionRuns) m.Get("/{run_id}", repo.GetActionRun) + m.Get("/{run_id}/jobs", repo.ListActionRunJobs) }) m.Group("/workflows", func() { diff --git a/routers/api/v1/repo/action.go b/routers/api/v1/repo/action.go index 63baaf4437..ba430d2e64 100644 --- a/routers/api/v1/repo/action.go +++ b/routers/api/v1/repo/action.go @@ -1031,3 +1031,68 @@ func GetActionRun(ctx *context.APIContext) { ctx.JSON(http.StatusOK, convert.ToActionRun(ctx, run, ctx.Doer)) } + +// ListActionRunJobs return a filtered list of jobs that belong to a single workflow run +func ListActionRunJobs(ctx *context.APIContext) { + // swagger:operation GET /repos/{owner}/{repo}/actions/runs/{run_id}/jobs repository ListActionRunJobs + // --- + // summary: List jobs of a workflow run + // produces: + // - application/json + // parameters: + // - name: owner + // in: path + // description: owner of the repo + // type: string + // required: true + // - name: repo + // in: path + // description: name of the repo + // type: string + // required: true + // - name: run_id + // in: path + // description: ID of the workflow run + // type: integer + // format: int64 + // required: true + // responses: + // "200": + // "$ref": "#/responses/ActionRunJobList" + // "400": + // "$ref": "#/responses/error" + // "403": + // "$ref": "#/responses/forbidden" + // "404": + // "$ref": "#/responses/notFound" + + run, err := actions_model.GetRunByID(ctx, ctx.ParamsInt64(":run_id")) + if err != nil { + if errors.Is(err, util.ErrNotExist) { + ctx.Error(http.StatusNotFound, "GetRunById", err) + } else { + ctx.Error(http.StatusInternalServerError, "GetRunByID", err) + } + return + } + + // Action runs lives in its own table, therefore we check that the + // run with the requested ID is owned by the repository + if ctx.Repo.Repository.ID != run.RepoID { + ctx.Error(http.StatusNotFound, "GetRunById", util.ErrNotExist) + return + } + + jobs, err := actions_model.GetRunJobsByRunID(ctx, run.ID) + if err != nil { + ctx.Error(http.StatusInternalServerError, "GetRunJobsByRunID", err) + return + } + + response := make([]*api.ActionRunJob, 0, len(jobs)) + for _, job := range jobs { + response = append(response, convert.ToActionRunJob(job)) + } + + ctx.JSON(http.StatusOK, response) +} diff --git a/routers/api/v1/shared/runners.go b/routers/api/v1/shared/runners.go index 21021e4076..6c99630ae1 100644 --- a/routers/api/v1/shared/runners.go +++ b/routers/api/v1/shared/runners.go @@ -74,18 +74,7 @@ func fromRunJobModelToResponse(job []*actions_model.ActionRunJob, labels []strin var res []*structs.ActionRunJob for i := range job { if len(labels) == 0 || labels[0] == "" && len(job[i].RunsOn) == 0 || job[i].ItRunsOn(labels) { - res = append(res, &structs.ActionRunJob{ - ID: job[i].ID, - Attempt: job[i].Attempt, - Handle: job[i].Handle, - RepoID: job[i].RepoID, - OwnerID: job[i].OwnerID, - Name: job[i].Name, - Needs: job[i].Needs, - RunsOn: job[i].RunsOn, - TaskID: job[i].TaskID, - Status: job[i].Status.String(), - }) + res = append(res, convert.ToActionRunJob(job[i])) } } return res diff --git a/routers/api/v1/swagger/repo.go b/routers/api/v1/swagger/repo.go index 31da865bb5..5ad16b21ae 100644 --- a/routers/api/v1/swagger/repo.go +++ b/routers/api/v1/swagger/repo.go @@ -526,3 +526,10 @@ type swaggerActionRun struct { // in:body Body api.ActionRun `json:"body"` } + +// ActionRunJobList +// swagger:response ActionRunJobList +type swaggerActionRunJobList struct { + // in:body + Body []api.ActionRunJob `json:"body"` +} diff --git a/services/convert/action.go b/services/convert/action.go index 703c1f1261..204139e3aa 100644 --- a/services/convert/action.go +++ b/services/convert/action.go @@ -47,3 +47,22 @@ func ToActionRun(ctx context.Context, run *actions_model.ActionRun, doer *user_m HTMLURL: run.HTMLURL(), } } + +func ToActionRunJob(job *actions_model.ActionRunJob) *api.ActionRunJob { + if job == nil { + return nil + } + + return &api.ActionRunJob{ + ID: job.ID, + Attempt: job.Attempt, + Handle: job.Handle, + RepoID: job.RepoID, + OwnerID: job.OwnerID, + Name: job.Name, + Needs: job.Needs, + RunsOn: job.RunsOn, + TaskID: job.TaskID, + Status: job.Status.String(), + } +} diff --git a/templates/swagger/v1_json.tmpl b/templates/swagger/v1_json.tmpl index d45f9b2893..e9b10e1e52 100644 --- a/templates/swagger/v1_json.tmpl +++ b/templates/swagger/v1_json.tmpl @@ -5882,6 +5882,56 @@ } } }, + "/repos/{owner}/{repo}/actions/runs/{run_id}/jobs": { + "get": { + "produces": [ + "application/json" + ], + "tags": [ + "repository" + ], + "summary": "List jobs of a workflow run", + "operationId": "ListActionRunJobs", + "parameters": [ + { + "type": "string", + "description": "owner of the repo", + "name": "owner", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "name of the repo", + "name": "repo", + "in": "path", + "required": true + }, + { + "type": "integer", + "format": "int64", + "description": "ID of the workflow run", + "name": "run_id", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "$ref": "#/responses/ActionRunJobList" + }, + "400": { + "$ref": "#/responses/error" + }, + "403": { + "$ref": "#/responses/forbidden" + }, + "404": { + "$ref": "#/responses/notFound" + } + } + } + }, "/repos/{owner}/{repo}/actions/secrets": { "get": { "produces": [ @@ -30327,6 +30377,15 @@ "$ref": "#/definitions/ActionRun" } }, + "ActionRunJobList": { + "description": "ActionRunJobList", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/ActionRunJob" + } + } + }, "ActionRunList": { "description": "ActionRunList", "schema": { diff --git a/tests/integration/api_repo_actions_test.go b/tests/integration/api_repo_actions_test.go index fa56346a27..ccf333191e 100644 --- a/tests/integration/api_repo_actions_test.go +++ b/tests/integration/api_repo_actions_test.go @@ -4,6 +4,7 @@ package integration import ( + "context" "fmt" "io" "net/http" @@ -668,3 +669,109 @@ func TestAPIRepoActionsRunnerOperations(t *testing.T) { MakeRequest(t, request, http.StatusNotFound) }) } + +func TestActionsAPIListActionRunJobs(t *testing.T) { + defer tests.PrepareTestEnv(t)() + + t.Run("Jobs", func(t *testing.T) { + for _, setup := range []struct { + runID, repoID int64 + }{ + {793, 4}, + {895, 4}, + } { + repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: setup.repoID}) + user := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: repo.OwnerID}) + token := getUserToken(t, user.LowerName, auth_model.AccessTokenScopeReadRepository) + req := NewRequest(t, http.MethodGet, + fmt.Sprintf("/api/v1/repos/%s/%s/actions/runs/%d/jobs", + repo.OwnerName, repo.Name, setup.runID, + ), + ).AddTokenAuth(token) + res := MakeRequest(t, req, http.StatusOK) + var jobList []*api.ActionRunJob + DecodeJSON(t, res, &jobList) + + correctJobList, err := actions_model.GetRunJobsByRunID(context.Background(), setup.runID) + require.NoError(t, err, "GetRunJobsByRunID") + assert.Len(t, jobList, len(correctJobList)) + + for i := range jobList { + expected := correctJobList[i] + actual := jobList[i] + assert.Equal(t, expected.ID, actual.ID) + assert.Equal(t, expected.Attempt, actual.Attempt) + assert.Equal(t, expected.Handle, actual.Handle) + assert.Equal(t, expected.RepoID, actual.RepoID) + assert.Equal(t, expected.OwnerID, actual.OwnerID) + assert.Equal(t, expected.Name, actual.Name) + assert.Equal(t, expected.Needs, actual.Needs) + assert.Equal(t, expected.RunsOn, actual.RunsOn) + assert.Equal(t, expected.TaskID, actual.TaskID) + assert.Equal(t, expected.Status.String(), actual.Status) + + if expected.ID == 195 { + assert.Equal(t, &api.ActionRunJob{ + ID: 195, + Attempt: 1, + Handle: "", + RepoID: 4, + OwnerID: 1, + Name: "job1 (2)", + Needs: nil, + RunsOn: nil, + TaskID: 50, + Status: "success", + }, actual) + } else if expected.ID == 197 { + assert.Equal(t, &api.ActionRunJob{ + ID: 197, + Attempt: 0, + Handle: "", + RepoID: 4, + OwnerID: 1, + Name: "job1 (1)", + Needs: nil, + RunsOn: []string{"postmarketOS"}, + TaskID: 54, + Status: "failure", + }, actual) + } + } + } + }) + + repoID := int64(4) + runID := int64(793) + + repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: repoID}) + user := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: repo.OwnerID}) + token := getUserToken(t, user.LowerName, auth_model.AccessTokenScopeReadRepository) + + t.Run("Wrong Run ID", func(t *testing.T) { + req := NewRequest(t, http.MethodGet, + fmt.Sprintf("/api/v1/repos/%s/%s/actions/runs/%d/jobs", + repo.OwnerName, repo.Name, runID+9999, + ), + ).AddTokenAuth(token) + MakeRequest(t, req, http.StatusNotFound) + }) + + t.Run("Wrong Repo Name", func(t *testing.T) { + req := NewRequest(t, http.MethodGet, + fmt.Sprintf("/api/v1/repos/%s/%s/actions/runs/%d/jobs", + repo.OwnerName, repo.Name+"_wrong_repo", runID, + ), + ).AddTokenAuth(token) + MakeRequest(t, req, http.StatusNotFound) + }) + + t.Run("Wrong Owner", func(t *testing.T) { + req := NewRequest(t, http.MethodGet, + fmt.Sprintf("/api/v1/repos/%s/%s/actions/runs/%d/jobs", + repo.OwnerName+"_wrong_owner", repo.Name, runID, + ), + ).AddTokenAuth(token) + MakeRequest(t, req, http.StatusNotFound) + }) +} From 92f1b6fdd2851bb522b8b153d77f51eaf67ad309 Mon Sep 17 00:00:00 2001 From: Andreas Ahlenstorf Date: Tue, 7 Apr 2026 05:03:26 +0200 Subject: [PATCH 097/352] test: fix test that was supposed to test DST behaviour but did not (#12007) https://codeberg.org/forgejo/forgejo/pulls/11851 introduced tests that verify the scheduling of Forgejo Actions workflows during daylight saving time (DST) changes. Unfortunately, one test didn't test what it was supposed to because it used a reference time in UTC that was already after the clock change has happened. This change also adds tests that verify that `NewActionScheduleSpec()` respects time zones when calculating the initial execution time of a scheduled workflow. ## Checklist The [contributor guide](https://forgejo.org/docs/next/contributor/) contains information that will be helpful to first time contributors. All work and communication must conform to Forgejo's [AI Agreement](https://codeberg.org/forgejo/governance/src/branch/main/AIAgreement.md). There also are a few [conditions for merging Pull Requests in Forgejo repositories](https://codeberg.org/forgejo/governance/src/branch/main/PullRequestsAgreement.md). You are also welcome to join the [Forgejo development chatroom](https://matrix.to/#/#forgejo-development:matrix.org). ### Tests for Go changes (can be removed for JavaScript changes) - I added test coverage for Go changes... - [x] in their respective `*_test.go` for unit tests. - [ ] in the `tests/integration` directory if it involves interactions with a live Forgejo server. - I ran... - [x] `make pr-go` before pushing ### Tests for JavaScript changes (can be removed for Go changes) - I added test coverage for JavaScript changes... - [ ] in `web_src/js/*.test.js` if it can be unit tested. - [ ] in `tests/e2e/*.test.e2e.js` if it requires interactions with a live Forgejo server (see also the [developer guide for JavaScript testing](https://codeberg.org/forgejo/forgejo/src/branch/forgejo/tests/e2e/README.md#end-to-end-tests)). ### Documentation - [ ] I created a pull request [to the documentation](https://codeberg.org/forgejo/docs) to explain to Forgejo users how to use this change. - [x] I did not document these changes and I do not expect someone else to do it. ### Release notes - [ ] This change will be noticed by a Forgejo user or admin (feature, bug fix, performance, etc.). I suggest to include a release note for this change. - [x] This change is not visible to a Forgejo user or admin (refactor, dependency upgrade, etc.). I think there is no need to add a release note for this change. *The decision if the pull request will be shown in the release notes is up to the mergers / release team.* The content of the `release-notes/.md` file will serve as the basis for the release notes. If the file does not exist, the title of the pull request will be used instead. Reviewed-on: https://codeberg.org/forgejo/forgejo/pulls/12007 Reviewed-by: Mathieu Fenniak Co-authored-by: Andreas Ahlenstorf Co-committed-by: Andreas Ahlenstorf --- models/actions/schedule_spec_test.go | 50 ++++++++++++++++++++++++---- 1 file changed, 44 insertions(+), 6 deletions(-) diff --git a/models/actions/schedule_spec_test.go b/models/actions/schedule_spec_test.go index eb3a83d0a6..6b9db9f39a 100644 --- a/models/actions/schedule_spec_test.go +++ b/models/actions/schedule_spec_test.go @@ -13,6 +13,44 @@ import ( "github.com/stretchr/testify/require" ) +func TestActionScheduleSpec_NewActionScheduleSpec(t *testing.T) { + tests := []struct { + name string + refTime time.Time + cronPattern string + timeZone string + want string + wantErr assert.ErrorAssertionFunc + }{ + { + name: "without timezone", + refTime: time.Date(2026, 4, 6, 11, 56, 0, 0, time.UTC), + cronPattern: "58 14 * * *", + want: "2026-04-06T14:58:00Z", + wantErr: assert.NoError, + }, + { + name: "with separate timezone", + refTime: time.Date(2026, 4, 6, 11, 56, 0, 0, time.UTC), + cronPattern: "58 14 * * *", + timeZone: "Europe/Tallinn", // +03 (EEST) + want: "2026-04-06T11:58:00Z", + wantErr: assert.NoError, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + s, err := NewActionScheduleSpec(test.cronPattern, optional.FromNonDefault(test.timeZone), test.refTime) + test.wantErr(t, err) + + if err == nil { + assert.Equal(t, test.want, s.Next.AsTime().UTC().Format(time.RFC3339)) + } + }) + } +} + func TestActionScheduleSpec_Parse(t *testing.T) { // Mock the local timezone is not UTC local := time.Local @@ -88,24 +126,24 @@ func TestActionScheduleSpec_Parse(t *testing.T) { // skipping the execution on that day because any time between 2 and 3 o'clock never happened. Forgejo uses // option B because the code it inherited already did that and was exposed to users. name: "skips execution during DST jump forwards", - refTime: time.Date(2025, 3, 30, 1, 5, 0, 0, time.UTC), - spec: "10 2 * * *", // The clock jumps at 2 o'clock to 3 o'clock. + refTime: time.Date(2025, 3, 30, 0, 55, 0, 0, time.UTC), // 01:55 local time + spec: "10 2 * * *", // The clock jumps at 2 o'clock to 3 o'clock. timeZone: "Europe/Berlin", want: "2025-03-31T00:10:00Z", wantErr: assert.NoError, }, { name: "executes a first time before DST jump backwards", - refTime: time.Date(2025, 10, 26, 0, 5, 0, 0, time.UTC), - spec: "10 2 * * *", // The clock jumps at 3 o'clock to 2 o'clock. + refTime: time.Date(2025, 10, 26, 0, 5, 0, 0, time.UTC), // 02:05 local time + spec: "10 2 * * *", // The clock jumps at 3 o'clock to 2 o'clock. timeZone: "Europe/Berlin", want: "2025-10-26T00:10:00Z", wantErr: assert.NoError, }, { name: "executes a second time after DST jump backwards", - refTime: time.Date(2025, 10, 26, 1, 5, 0, 0, time.UTC), - spec: "10 2 * * *", // The clock jumps at 3 o'clock to 2 o'clock. + refTime: time.Date(2025, 10, 26, 1, 5, 0, 0, time.UTC), // 02:05 local time + spec: "10 2 * * *", // The clock jumps at 3 o'clock to 2 o'clock. timeZone: "Europe/Berlin", want: "2025-10-26T01:10:00Z", wantErr: assert.NoError, From 0b9e11d96bce9660ce9becdf6ed08d1ae90071e7 Mon Sep 17 00:00:00 2001 From: Renovate Bot Date: Tue, 7 Apr 2026 06:50:24 +0200 Subject: [PATCH 098/352] Update renovate Docker tag to v43.104.4 (forgejo) (#12002) Co-authored-by: Renovate Bot Co-committed-by: Renovate Bot --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 08e8632ac8..665d8b5f38 100644 --- a/Makefile +++ b/Makefile @@ -48,7 +48,7 @@ GOVULNCHECK_PACKAGE ?= golang.org/x/vuln/cmd/govulncheck@v1 # renovate: datasour DEADCODE_PACKAGE ?= golang.org/x/tools/cmd/deadcode@v0.43.0 # renovate: datasource=go ERRORTYPE_PACKAGE ?= fillmore-labs.com/errortype@v0.0.11 # renovate: datasource=go GOMOCK_PACKAGE ?= go.uber.org/mock/mockgen@v0.6.0 # renovate: datasource=go -RENOVATE_NPM_PACKAGE ?= renovate@43.99.1 # renovate: datasource=docker packageName=data.forgejo.org/renovate/renovate +RENOVATE_NPM_PACKAGE ?= renovate@43.104.8 # renovate: datasource=docker packageName=data.forgejo.org/renovate/renovate # https://github.com/disposable-email-domains/disposable-email-domains/commits/main/ DISPOSABLE_EMAILS_SHA ?= 0c27e671231d27cf66370034d7f6818037416989 # renovate: ... From dd968f147d500920b80dbbf3fc0faf9034ca8ddd Mon Sep 17 00:00:00 2001 From: Henry Catalini Smith Date: Wed, 8 Apr 2026 02:20:35 +0200 Subject: [PATCH 099/352] Improve repo file list table semantics for screen readers (#11846) https://codeberg.org/forgejo/forgejo/issues/11116 To understand the impact of this you really need to listen to the before and after screen recordings attached. https://codeberg.org/forgejo/forgejo/issues/11116 is a really great bug report, and I was surprised by how disorienting this actually was when testing manually compared to my expectation after reading the issue. This is an impactful improvement! This is my first time adding new translation strings. Excited to learn more about that if I've guessed wrong about how to do it. To summarise, what we're doing here is as follows. 1. Address the core issue by changing the existing `` elements to `` so that screen readers stop semantically associating them with each row and reading them out for every table cell. 2. Replace them with real `` elements that communicate the true semantic role of each column. 3. Add a ``. This serves a dual purpose: it gives the table an accessible name which improves the navigability of the page, and it gives us a place to explain to the user that the first row of the table is a little bit different because it's the latest commit rather than a file in the repo. 4. Visually hide the new caption and headings so that only screen reader users get them. ## Checklist The [contributor guide](https://forgejo.org/docs/next/contributor/) contains information that will be helpful to first time contributors. All work and communication must conform to Forgejo's [AI Agreement](https://codeberg.org/forgejo/governance/src/branch/main/AIAgreement.md). There also are a few [conditions for merging Pull Requests in Forgejo repositories](https://codeberg.org/forgejo/governance/src/branch/main/PullRequestsAgreement.md). You are also welcome to join the [Forgejo development chatroom](https://matrix.to/#/#forgejo-development:matrix.org). ### Tests for JavaScript changes - I added test coverage for JavaScript changes... - [ ] in `web_src/js/*.test.js` if it can be unit tested. - [ ] in `tests/e2e/*.test.e2e.js` if it requires interactions with a live Forgejo server (see also the [developer guide for JavaScript testing](https://codeberg.org/forgejo/forgejo/src/branch/forgejo/tests/e2e/README.md#end-to-end-tests)). ### Documentation - [ ] I created a pull request [to the documentation](https://codeberg.org/forgejo/docs) to explain to Forgejo users how to use this change. - [ ] I did not document these changes and I do not expect someone else to do it. ### Release notes - [x] This change will be noticed by a Forgejo user or admin (feature, bug fix, performance, etc.). I suggest to include a release note for this change. - [ ] This change is not visible to a Forgejo user or admin (refactor, dependency upgrade, etc.). I think there is no need to add a release note for this change. *The decision if the pull request will be shown in the release notes is up to the mergers / release team.* The content of the `release-notes/.md` file will serve as the basis for the release notes. If the file does not exist, the title of the pull request will be used instead. Reviewed-on: https://codeberg.org/forgejo/forgejo/pulls/11846 Reviewed-by: Gusted Reviewed-by: Andreas Ahlenstorf Co-authored-by: Henry Catalini Smith Co-committed-by: Henry Catalini Smith --- options/locale_next/locale_en-US.json | 4 ++++ templates/repo/view_list.tmpl | 18 ++++++++++++------ tests/integration/repo_test.go | 4 ++-- web_src/css/repo.css | 2 +- 4 files changed, 19 insertions(+), 9 deletions(-) diff --git a/options/locale_next/locale_en-US.json b/options/locale_next/locale_en-US.json index c0ed083b66..3ffcb45bce 100644 --- a/options/locale_next/locale_en-US.json +++ b/options/locale_next/locale_en-US.json @@ -52,6 +52,10 @@ "relativetime.1week": "last week", "relativetime.1month": "last month", "relativetime.1year": "last year", + "repo.files.caption": "Repository files (latest commit first)", + "repo.files.filename": "Filename", + "repo.files.last_commit_message": "Latest commit message", + "repo.files.last_commit_date": "Latest commit date", "repo.issues.filter_poster.hint": "Filter by the author", "repo.issues.filter_assignee.hint": "Filter by assigned user", "repo.issues.filter_reviewers.hint": "Filter by user who reviewed", diff --git a/templates/repo/view_list.tmpl b/templates/repo/view_list.tmpl index ffd8b1a515..75f53c1b5f 100644 --- a/templates/repo/view_list.tmpl +++ b/templates/repo/view_list.tmpl @@ -1,17 +1,23 @@ - + + + + + + + - + + - - {{if .HasParentPath}} diff --git a/tests/integration/repo_test.go b/tests/integration/repo_test.go index 1b4a574f13..2f189bf7a8 100644 --- a/tests/integration/repo_test.go +++ b/tests/integration/repo_test.go @@ -70,7 +70,7 @@ func testViewRepo(t *testing.T) { resp := session.MakeRequest(t, req, http.StatusOK) htmlDoc := NewHTMLParser(t, resp.Body) - files := htmlDoc.doc.Find("#repo-files-table > TBODY > TR") + files := htmlDoc.doc.Find("#repo-files-table > tbody > tr:not(.commit-list)") type file struct { fileName string @@ -1039,7 +1039,7 @@ func TestRepoFilesList(t *testing.T) { resp := MakeRequest(t, req, http.StatusOK) htmlDoc := NewHTMLParser(t, resp.Body) - filesList := htmlDoc.Find("#repo-files-table tbody tr").Map(func(_ int, s *goquery.Selection) string { + filesList := htmlDoc.Find("#repo-files-table tbody tr:not(.commit-list)").Map(func(_ int, s *goquery.Selection) string { return s.AttrOr("data-entryname", "") }) diff --git a/web_src/css/repo.css b/web_src/css/repo.css index 00f159f33b..66b1d5df71 100644 --- a/web_src/css/repo.css +++ b/web_src/css/repo.css @@ -237,7 +237,7 @@ td .commit-summary { max-width: calc(calc(min(100vw, 1280px)) - 145px - calc(2 * var(--page-margin-x))); } -.repository.file.list #repo-files-table thead th { +.repo-files-table-latest-commit-row td { font-weight: var(--font-weight-normal); } From 8a6d76cff4800b459f0d8bd03f49d24638598214 Mon Sep 17 00:00:00 2001 From: Henry Catalini Smith Date: Wed, 8 Apr 2026 02:32:14 +0200 Subject: [PATCH 100/352] Preserve focus on star/unstar & watch/unwatch buttons after click (#11932) Fixes https://codeberg.org/forgejo/forgejo/issues/11880. Adding `hx-on::after-settle="this.querySelector('button').focus()"` restores focus after the content has been swapped and the DOM has been setled. I tried `hx-on::after-swap` first since it's mentioned more often in https://github.com/bigskysoftware/htmx/issues/1869, but it didn't work. The demo attached in `focus.mp4` runs through a series of repeated clicks on both buttons. You can hear the screen reader announce the button's new label when focus is restored. ### Documentation - [ ] I created a pull request [to the documentation](https://codeberg.org/forgejo/docs) to explain to Forgejo users how to use this change. - [ ] I did not document these changes and I do not expect someone else to do it. ### Release notes - [x] This change will be noticed by a Forgejo user or admin (feature, bug fix, performance, etc.). I suggest to include a release note for this change. - [ ] This change is not visible to a Forgejo user or admin (refactor, dependency upgrade, etc.). I think there is no need to add a release note for this change. *The decision if the pull request will be shown in the release notes is up to the mergers / release team.* The content of the `release-notes/.md` file will serve as the basis for the release notes. If the file does not exist, the title of the pull request will be used instead. Reviewed-on: https://codeberg.org/forgejo/forgejo/pulls/11932 Reviewed-by: Andreas Ahlenstorf Co-authored-by: Henry Catalini Smith Co-committed-by: Henry Catalini Smith --- templates/repo/star_unstar.tmpl | 2 +- templates/repo/watch_unwatch.tmpl | 2 +- tests/e2e/repo-home.test.e2e.ts | 14 ++++++++++++++ 3 files changed, 16 insertions(+), 2 deletions(-) diff --git a/templates/repo/star_unstar.tmpl b/templates/repo/star_unstar.tmpl index d483495dd4..f3c1afc53c 100644 --- a/templates/repo/star_unstar.tmpl +++ b/templates/repo/star_unstar.tmpl @@ -1,4 +1,4 @@ - +
+ {{ctx.Locale.Tr "repo.files.caption"}} +
{{ctx.Locale.Tr "repo.files.filename"}}{{ctx.Locale.Tr "repo.files.last_commit_message"}}{{ctx.Locale.Tr "repo.files.last_commit_date"}}
+
{{template "repo/latest_commit" .}}
- -
{{if .LatestCommit}}{{if .LatestCommit.Committer}}{{DateUtils.TimeSince .LatestCommit.Committer.When}}{{end}}{{end}}{{if .LatestCommit}}{{if .LatestCommit.Committer}}{{DateUtils.TimeSince .LatestCommit.Committer.When}}{{end}}{{end}}
{{svg "octicon-reply" 16 "tw-mr-2"}}..