mirror of
https://codeberg.org/forgejo/forgejo.git
synced 2026-05-12 22:10:25 +00:00
chore: add modernizer linter (#11936)
- Go has a suite of small linters that helps with modernizing Go code by using newer functions and catching small mistakes, https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/modernize. - Enable this linter in golangci-lint. - There's also [`go fix`](https://go.dev/blog/gofix), which is not yet released as a linter in golangci-lint: https://github.com/golangci/golangci-lint/pull/6385 Reviewed-on: https://codeberg.org/forgejo/forgejo/pulls/11936 Reviewed-by: Mathieu Fenniak <mfenniak@noreply.codeberg.org> Co-authored-by: Gusted <postmaster@gusted.xyz> Co-committed-by: Gusted <postmaster@gusted.xyz>
This commit is contained in:
parent
d728fddec5
commit
77dbc35138
249 changed files with 659 additions and 1010 deletions
|
|
@ -15,6 +15,7 @@ linters:
|
||||||
- govet
|
- govet
|
||||||
- importas
|
- importas
|
||||||
- ineffassign
|
- ineffassign
|
||||||
|
- modernize
|
||||||
- nakedret
|
- nakedret
|
||||||
- nolintlint
|
- nolintlint
|
||||||
- revive
|
- revive
|
||||||
|
|
|
||||||
|
|
@ -150,8 +150,8 @@ func runCert(ctx context.Context, c *cli.Command) error {
|
||||||
BasicConstraintsValid: true,
|
BasicConstraintsValid: true,
|
||||||
}
|
}
|
||||||
|
|
||||||
hosts := strings.Split(c.String("host"), ",")
|
hosts := strings.SplitSeq(c.String("host"), ",")
|
||||||
for _, h := range hosts {
|
for h := range hosts {
|
||||||
if ip := net.ParseIP(h); ip != nil {
|
if ip := net.ParseIP(h); ip != nil {
|
||||||
template.IPAddresses = append(template.IPAddresses, ip)
|
template.IPAddresses = append(template.IPAddresses, ip)
|
||||||
} else {
|
} else {
|
||||||
|
|
|
||||||
37
cmd/dump.go
37
cmd/dump.go
|
|
@ -12,6 +12,7 @@ import (
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"slices"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
@ -83,11 +84,9 @@ func (o outputType) Join() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (o *outputType) Set(value string) error {
|
func (o *outputType) Set(value string) error {
|
||||||
for _, enum := range o.Enum {
|
if slices.Contains(o.Enum, value) {
|
||||||
if enum == value {
|
o.selected = value
|
||||||
o.selected = value
|
return nil
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return fmt.Errorf("allowed values are %s", o.Join())
|
return fmt.Errorf("allowed values are %s", o.Join())
|
||||||
|
|
@ -250,8 +249,8 @@ func runDump(stdCtx context.Context, ctx *cli.Command) error {
|
||||||
setupConsoleLogger(log.FATAL, log.CanColorStderr, os.Stderr)
|
setupConsoleLogger(log.FATAL, log.CanColorStderr, os.Stderr)
|
||||||
} else {
|
} else {
|
||||||
for _, suffix := range outputTypeEnum.Enum {
|
for _, suffix := range outputTypeEnum.Enum {
|
||||||
if strings.HasSuffix(fileName, "."+suffix) {
|
if before, ok := strings.CutSuffix(fileName, "."+suffix); ok {
|
||||||
fileName = strings.TrimSuffix(fileName, "."+suffix)
|
fileName = before
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -330,14 +329,12 @@ func runDump(stdCtx context.Context, ctx *cli.Command) error {
|
||||||
go dumpDatabase(ctx, archiveJobs, &wg, verbose)
|
go dumpDatabase(ctx, archiveJobs, &wg, verbose)
|
||||||
|
|
||||||
if len(setting.CustomConf) > 0 {
|
if len(setting.CustomConf) > 0 {
|
||||||
wg.Add(1)
|
wg.Go(func() {
|
||||||
go func() {
|
|
||||||
defer wg.Done()
|
|
||||||
log.Info("Adding custom configuration file from %s", setting.CustomConf)
|
log.Info("Adding custom configuration file from %s", setting.CustomConf)
|
||||||
if err := addFile(archiveJobs, "app.ini", setting.CustomConf, verbose); err != nil {
|
if err := addFile(archiveJobs, "app.ini", setting.CustomConf, verbose); err != nil {
|
||||||
fatal("Failed to include specified app.ini: %v", err)
|
fatal("Failed to include specified app.ini: %v", err)
|
||||||
}
|
}
|
||||||
}()
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
if ctx.IsSet("skip-custom-dir") && ctx.Bool("skip-custom-dir") {
|
if ctx.IsSet("skip-custom-dir") && ctx.Bool("skip-custom-dir") {
|
||||||
|
|
@ -361,15 +358,13 @@ func runDump(stdCtx context.Context, ctx *cli.Command) error {
|
||||||
if ctx.IsSet("skip-attachment-data") && ctx.Bool("skip-attachment-data") {
|
if ctx.IsSet("skip-attachment-data") && ctx.Bool("skip-attachment-data") {
|
||||||
log.Info("Skipping attachment data")
|
log.Info("Skipping attachment data")
|
||||||
} else {
|
} else {
|
||||||
wg.Add(1)
|
wg.Go(func() {
|
||||||
go func() {
|
|
||||||
defer wg.Done()
|
|
||||||
if err := storage.Attachments.IterateObjects("", func(objPath string, object storage.Object) error {
|
if err := storage.Attachments.IterateObjects("", func(objPath string, object storage.Object) error {
|
||||||
return addObject(archiveJobs, object, path.Join("data", "attachments", objPath), verbose)
|
return addObject(archiveJobs, object, path.Join("data", "attachments", objPath), verbose)
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
fatal("Failed to dump attachments: %v", err)
|
fatal("Failed to dump attachments: %v", err)
|
||||||
}
|
}
|
||||||
}()
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
if ctx.IsSet("skip-package-data") && ctx.Bool("skip-package-data") {
|
if ctx.IsSet("skip-package-data") && ctx.Bool("skip-package-data") {
|
||||||
|
|
@ -377,15 +372,13 @@ func runDump(stdCtx context.Context, ctx *cli.Command) error {
|
||||||
} else if !setting.Packages.Enabled {
|
} else if !setting.Packages.Enabled {
|
||||||
log.Info("Package registry not enabled - skipping")
|
log.Info("Package registry not enabled - skipping")
|
||||||
} else {
|
} else {
|
||||||
wg.Add(1)
|
wg.Go(func() {
|
||||||
go func() {
|
|
||||||
defer wg.Done()
|
|
||||||
if err := storage.Packages.IterateObjects("", func(objPath string, object storage.Object) error {
|
if err := storage.Packages.IterateObjects("", func(objPath string, object storage.Object) error {
|
||||||
return addObject(archiveJobs, object, path.Join("data", "packages", objPath), verbose)
|
return addObject(archiveJobs, object, path.Join("data", "packages", objPath), verbose)
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
fatal("Failed to dump packages: %v", err)
|
fatal("Failed to dump packages: %v", err)
|
||||||
}
|
}
|
||||||
}()
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Doesn't check if LogRootPath exists before processing --skip-log intentionally,
|
// Doesn't check if LogRootPath exists before processing --skip-log intentionally,
|
||||||
|
|
@ -399,13 +392,11 @@ func runDump(stdCtx context.Context, ctx *cli.Command) error {
|
||||||
log.Error("Failed to check if %s exists: %v", setting.Log.RootPath, err)
|
log.Error("Failed to check if %s exists: %v", setting.Log.RootPath, err)
|
||||||
}
|
}
|
||||||
if isExist {
|
if isExist {
|
||||||
wg.Add(1)
|
wg.Go(func() {
|
||||||
go func() {
|
|
||||||
defer wg.Done()
|
|
||||||
if err := addRecursiveExclude(archiveJobs, "log", setting.Log.RootPath, []string{absFileName}, verbose); err != nil {
|
if err := addRecursiveExclude(archiveJobs, "log", setting.Log.RootPath, []string{absFileName}, verbose); err != nil {
|
||||||
fatal("Failed to include log: %v", err)
|
fatal("Failed to include log: %v", err)
|
||||||
}
|
}
|
||||||
}()
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -143,8 +143,8 @@ func runDumpRepository(stdCtx context.Context, ctx *cli.Command) error {
|
||||||
opts.PullRequests = true
|
opts.PullRequests = true
|
||||||
opts.ReleaseAssets = true
|
opts.ReleaseAssets = true
|
||||||
} else {
|
} else {
|
||||||
units := strings.Split(ctx.String("units"), ",")
|
units := strings.SplitSeq(ctx.String("units"), ",")
|
||||||
for _, unit := range units {
|
for unit := range units {
|
||||||
switch strings.ToLower(strings.TrimSpace(unit)) {
|
switch strings.ToLower(strings.TrimSpace(unit)) {
|
||||||
case "":
|
case "":
|
||||||
continue
|
continue
|
||||||
|
|
|
||||||
|
|
@ -4,6 +4,8 @@
|
||||||
package actions
|
package actions
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"slices"
|
||||||
|
|
||||||
"forgejo.org/modules/translation"
|
"forgejo.org/modules/translation"
|
||||||
|
|
||||||
runnerv1 "code.forgejo.org/forgejo/actions-proto/runner/v1"
|
runnerv1 "code.forgejo.org/forgejo/actions-proto/runner/v1"
|
||||||
|
|
@ -107,12 +109,7 @@ func (s Status) IsBlocked() bool {
|
||||||
|
|
||||||
// In returns whether s is one of the given statuses
|
// In returns whether s is one of the given statuses
|
||||||
func (s Status) In(statuses ...Status) bool {
|
func (s Status) In(statuses ...Status) bool {
|
||||||
for _, v := range statuses {
|
return slices.Contains(statuses, s)
|
||||||
if s == v {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s Status) AsResult() runnerv1.Result {
|
func (s Status) AsResult() runnerv1.Result {
|
||||||
|
|
|
||||||
|
|
@ -132,12 +132,7 @@ func (at ActionType) String() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (at ActionType) InActions(actions ...string) bool {
|
func (at ActionType) InActions(actions ...string) bool {
|
||||||
for _, action := range actions {
|
return slices.Contains(actions, at.String())
|
||||||
if action == at.String() {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Action represents user operation type and other information to
|
// Action represents user operation type and other information to
|
||||||
|
|
|
||||||
|
|
@ -213,10 +213,7 @@ func (nl NotificationList) LoadRepos(ctx context.Context) (repo_model.Repository
|
||||||
repos := make(map[int64]*repo_model.Repository, len(repoIDs))
|
repos := make(map[int64]*repo_model.Repository, len(repoIDs))
|
||||||
left := len(repoIDs)
|
left := len(repoIDs)
|
||||||
for left > 0 {
|
for left > 0 {
|
||||||
limit := db.DefaultMaxInSize
|
limit := min(left, db.DefaultMaxInSize)
|
||||||
if left < limit {
|
|
||||||
limit = left
|
|
||||||
}
|
|
||||||
rows, err := db.GetEngine(ctx).
|
rows, err := db.GetEngine(ctx).
|
||||||
In("id", repoIDs[:limit]).
|
In("id", repoIDs[:limit]).
|
||||||
Rows(new(repo_model.Repository))
|
Rows(new(repo_model.Repository))
|
||||||
|
|
@ -287,10 +284,7 @@ func (nl NotificationList) LoadIssues(ctx context.Context) ([]int, error) {
|
||||||
issues := make(map[int64]*issues_model.Issue, len(issueIDs))
|
issues := make(map[int64]*issues_model.Issue, len(issueIDs))
|
||||||
left := len(issueIDs)
|
left := len(issueIDs)
|
||||||
for left > 0 {
|
for left > 0 {
|
||||||
limit := db.DefaultMaxInSize
|
limit := min(left, db.DefaultMaxInSize)
|
||||||
if left < limit {
|
|
||||||
limit = left
|
|
||||||
}
|
|
||||||
rows, err := db.GetEngine(ctx).
|
rows, err := db.GetEngine(ctx).
|
||||||
In("id", issueIDs[:limit]).
|
In("id", issueIDs[:limit]).
|
||||||
Rows(new(issues_model.Issue))
|
Rows(new(issues_model.Issue))
|
||||||
|
|
@ -382,10 +376,7 @@ func (nl NotificationList) LoadUsers(ctx context.Context) ([]int, error) {
|
||||||
users := make(map[int64]*user_model.User, len(userIDs))
|
users := make(map[int64]*user_model.User, len(userIDs))
|
||||||
left := len(userIDs)
|
left := len(userIDs)
|
||||||
for left > 0 {
|
for left > 0 {
|
||||||
limit := db.DefaultMaxInSize
|
limit := min(left, db.DefaultMaxInSize)
|
||||||
if left < limit {
|
|
||||||
limit = left
|
|
||||||
}
|
|
||||||
rows, err := db.GetEngine(ctx).
|
rows, err := db.GetEngine(ctx).
|
||||||
In("id", userIDs[:limit]).
|
In("id", userIDs[:limit]).
|
||||||
Rows(new(user_model.User))
|
Rows(new(user_model.User))
|
||||||
|
|
@ -433,10 +424,7 @@ func (nl NotificationList) LoadComments(ctx context.Context) ([]int, error) {
|
||||||
comments := make(map[int64]*issues_model.Comment, len(commentIDs))
|
comments := make(map[int64]*issues_model.Comment, len(commentIDs))
|
||||||
left := len(commentIDs)
|
left := len(commentIDs)
|
||||||
for left > 0 {
|
for left > 0 {
|
||||||
limit := db.DefaultMaxInSize
|
limit := min(left, db.DefaultMaxInSize)
|
||||||
if left < limit {
|
|
||||||
limit = left
|
|
||||||
}
|
|
||||||
rows, err := db.GetEngine(ctx).
|
rows, err := db.GetEngine(ctx).
|
||||||
In("id", commentIDs[:limit]).
|
In("id", commentIDs[:limit]).
|
||||||
Rows(new(issues_model.Comment))
|
Rows(new(issues_model.Comment))
|
||||||
|
|
|
||||||
|
|
@ -138,10 +138,7 @@ func GetActivityStatsTopAuthors(ctx context.Context, repo *repo_model.Repository
|
||||||
return v[i].Commits > v[j].Commits
|
return v[i].Commits > v[j].Commits
|
||||||
})
|
})
|
||||||
|
|
||||||
cnt := count
|
cnt := min(count, len(v))
|
||||||
if cnt > len(v) {
|
|
||||||
cnt = len(v)
|
|
||||||
}
|
|
||||||
|
|
||||||
return v[:cnt], nil
|
return v[:cnt], nil
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -5,6 +5,7 @@ package auth
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"slices"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"forgejo.org/models/perm"
|
"forgejo.org/models/perm"
|
||||||
|
|
@ -204,12 +205,7 @@ func GetRequiredScopes(level AccessTokenScopeLevel, scopeCategories ...AccessTok
|
||||||
|
|
||||||
// ContainsCategory checks if a list of categories contains a specific category
|
// ContainsCategory checks if a list of categories contains a specific category
|
||||||
func ContainsCategory(categories []AccessTokenScopeCategory, category AccessTokenScopeCategory) bool {
|
func ContainsCategory(categories []AccessTokenScopeCategory, category AccessTokenScopeCategory) bool {
|
||||||
for _, c := range categories {
|
return slices.Contains(categories, category)
|
||||||
if c == category {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetScopeLevelFromAccessMode converts permission access mode to scope level
|
// GetScopeLevelFromAccessMode converts permission access mode to scope level
|
||||||
|
|
|
||||||
|
|
@ -505,7 +505,7 @@ func (grant *OAuth2Grant) IncreaseCounter(ctx context.Context) error {
|
||||||
|
|
||||||
// ScopeContains returns true if the grant scope contains the specified scope
|
// ScopeContains returns true if the grant scope contains the specified scope
|
||||||
func (grant *OAuth2Grant) ScopeContains(scope string) bool {
|
func (grant *OAuth2Grant) ScopeContains(scope string) bool {
|
||||||
for _, currentScope := range strings.Split(grant.Scope, " ") {
|
for currentScope := range strings.SplitSeq(grant.Scope, " ") {
|
||||||
if scope == currentScope {
|
if scope == currentScope {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -91,7 +91,7 @@ var registeredConfigs = map[Type]func() Config{}
|
||||||
|
|
||||||
// RegisterTypeConfig register a config for a provided type
|
// RegisterTypeConfig register a config for a provided type
|
||||||
func RegisterTypeConfig(typ Type, exemplar Config) {
|
func RegisterTypeConfig(typ Type, exemplar Config) {
|
||||||
if reflect.TypeOf(exemplar).Kind() == reflect.Ptr {
|
if reflect.TypeOf(exemplar).Kind() == reflect.Pointer {
|
||||||
// Pointer:
|
// Pointer:
|
||||||
registeredConfigs[typ] = func() Config {
|
registeredConfigs[typ] = func() Config {
|
||||||
return reflect.New(reflect.ValueOf(exemplar).Elem().Type()).Interface().(Config)
|
return reflect.New(reflect.ValueOf(exemplar).Elem().Type()).Interface().(Config)
|
||||||
|
|
|
||||||
|
|
@ -80,7 +80,7 @@ func Iterate[Bean any](ctx context.Context, cond builder.Cond, f func(ctx contex
|
||||||
|
|
||||||
func extractFieldValue(bean any, fieldName string) any {
|
func extractFieldValue(bean any, fieldName string) any {
|
||||||
v := reflect.ValueOf(bean)
|
v := reflect.ValueOf(bean)
|
||||||
if v.Kind() == reflect.Ptr {
|
if v.Kind() == reflect.Pointer {
|
||||||
v = v.Elem()
|
v = v.Elem()
|
||||||
}
|
}
|
||||||
field := v.FieldByName(fieldName)
|
field := v.FieldByName(fieldName)
|
||||||
|
|
|
||||||
|
|
@ -6,6 +6,7 @@ package db
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"regexp"
|
"regexp"
|
||||||
|
"slices"
|
||||||
"strings"
|
"strings"
|
||||||
"unicode/utf8"
|
"unicode/utf8"
|
||||||
|
|
||||||
|
|
@ -114,10 +115,8 @@ func IsUsableName(names, patterns []string, name string) error {
|
||||||
return ErrNameEmpty
|
return ErrNameEmpty
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := range names {
|
if slices.Contains(names, name) {
|
||||||
if name == names[i] {
|
return ErrNameReserved{name}
|
||||||
return ErrNameReserved{name}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, pat := range patterns {
|
for _, pat := range patterns {
|
||||||
|
|
|
||||||
|
|
@ -46,10 +46,7 @@ func (f *file) readAt(fileMeta *DbfsMeta, offset int64, p []byte) (n int, err er
|
||||||
blobPos := int(offset % f.blockSize)
|
blobPos := int(offset % f.blockSize)
|
||||||
blobOffset := offset - int64(blobPos)
|
blobOffset := offset - int64(blobPos)
|
||||||
blobRemaining := int(f.blockSize) - blobPos
|
blobRemaining := int(f.blockSize) - blobPos
|
||||||
needRead := len(p)
|
needRead := min(len(p), blobRemaining)
|
||||||
if needRead > blobRemaining {
|
|
||||||
needRead = blobRemaining
|
|
||||||
}
|
|
||||||
if blobOffset+int64(blobPos)+int64(needRead) > fileMeta.FileSize {
|
if blobOffset+int64(blobPos)+int64(needRead) > fileMeta.FileSize {
|
||||||
needRead = int(fileMeta.FileSize - blobOffset - int64(blobPos))
|
needRead = int(fileMeta.FileSize - blobOffset - int64(blobPos))
|
||||||
}
|
}
|
||||||
|
|
@ -66,14 +63,8 @@ func (f *file) readAt(fileMeta *DbfsMeta, offset int64, p []byte) (n int, err er
|
||||||
blobData = nil
|
blobData = nil
|
||||||
}
|
}
|
||||||
|
|
||||||
canCopy := len(blobData) - blobPos
|
canCopy := max(len(blobData)-blobPos, 0)
|
||||||
if canCopy <= 0 {
|
realRead := min(needRead, canCopy)
|
||||||
canCopy = 0
|
|
||||||
}
|
|
||||||
realRead := needRead
|
|
||||||
if realRead > canCopy {
|
|
||||||
realRead = canCopy
|
|
||||||
}
|
|
||||||
if realRead > 0 {
|
if realRead > 0 {
|
||||||
copy(p[:realRead], fileData.BlobData[blobPos:blobPos+realRead])
|
copy(p[:realRead], fileData.BlobData[blobPos:blobPos+realRead])
|
||||||
}
|
}
|
||||||
|
|
@ -113,10 +104,7 @@ func (f *file) Write(p []byte) (n int, err error) {
|
||||||
blobPos := int(f.offset % f.blockSize)
|
blobPos := int(f.offset % f.blockSize)
|
||||||
blobOffset := f.offset - int64(blobPos)
|
blobOffset := f.offset - int64(blobPos)
|
||||||
blobRemaining := int(f.blockSize) - blobPos
|
blobRemaining := int(f.blockSize) - blobPos
|
||||||
needWrite := len(p)
|
needWrite := min(len(p), blobRemaining)
|
||||||
if needWrite > blobRemaining {
|
|
||||||
needWrite = blobRemaining
|
|
||||||
}
|
|
||||||
buf := make([]byte, f.blockSize)
|
buf := make([]byte, f.blockSize)
|
||||||
readBytes, err := f.readAt(fileMeta, blobOffset, buf)
|
readBytes, err := f.readAt(fileMeta, blobOffset, buf)
|
||||||
if err != nil && !errors.Is(err, io.EOF) {
|
if err != nil && !errors.Is(err, io.EOF) {
|
||||||
|
|
|
||||||
|
|
@ -213,7 +213,7 @@ func (protectBranch *ProtectedBranch) GetUnprotectedFilePatterns() []glob.Glob {
|
||||||
|
|
||||||
func getFilePatterns(filePatterns string) []glob.Glob {
|
func getFilePatterns(filePatterns string) []glob.Glob {
|
||||||
extarr := make([]glob.Glob, 0, 10)
|
extarr := make([]glob.Glob, 0, 10)
|
||||||
for _, expr := range strings.Split(strings.ToLower(filePatterns), ";") {
|
for expr := range strings.SplitSeq(strings.ToLower(filePatterns), ";") {
|
||||||
expr = strings.TrimSpace(expr)
|
expr = strings.TrimSpace(expr)
|
||||||
if expr != "" {
|
if expr != "" {
|
||||||
if g, err := glob.Compile(expr, '.', '/'); err != nil {
|
if g, err := glob.Compile(expr, '.', '/'); err != nil {
|
||||||
|
|
|
||||||
|
|
@ -265,7 +265,7 @@ func deleteDB() error {
|
||||||
|
|
||||||
func removeAllWithRetry(dir string) error {
|
func removeAllWithRetry(dir string) error {
|
||||||
var err error
|
var err error
|
||||||
for i := 0; i < 20; i++ {
|
for range 20 {
|
||||||
err = os.RemoveAll(dir)
|
err = os.RemoveAll(dir)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
break
|
break
|
||||||
|
|
|
||||||
|
|
@ -5,6 +5,7 @@ package v1_11
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"slices"
|
||||||
|
|
||||||
"xorm.io/xorm"
|
"xorm.io/xorm"
|
||||||
)
|
)
|
||||||
|
|
@ -345,10 +346,8 @@ func AddBranchProtectionCanPushAndEnableWhitelist(x *xorm.Engine) error {
|
||||||
}
|
}
|
||||||
return AccessModeWrite <= perm.UnitsMode[UnitTypeCode], nil
|
return AccessModeWrite <= perm.UnitsMode[UnitTypeCode], nil
|
||||||
}
|
}
|
||||||
for _, id := range protectedBranch.ApprovalsWhitelistUserIDs {
|
if slices.Contains(protectedBranch.ApprovalsWhitelistUserIDs, reviewer.ID) {
|
||||||
if id == reviewer.ID {
|
return true, nil
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// isUserInTeams
|
// isUserInTeams
|
||||||
|
|
|
||||||
|
|
@ -146,7 +146,7 @@ func copyOldAvatarToNewLocation(userID int64, oldAvatar string) (string, error)
|
||||||
return "", fmt.Errorf("io.ReadAll: %w", err)
|
return "", fmt.Errorf("io.ReadAll: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
newAvatar := fmt.Sprintf("%x", md5.Sum([]byte(fmt.Sprintf("%d-%x", userID, md5.Sum(data)))))
|
newAvatar := fmt.Sprintf("%x", md5.Sum(fmt.Appendf(nil, "%d-%x", userID, md5.Sum(data))))
|
||||||
if newAvatar == oldAvatar {
|
if newAvatar == oldAvatar {
|
||||||
return newAvatar, nil
|
return newAvatar, nil
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -329,7 +329,7 @@ func ConvertScopedAccessTokens(x *xorm.Engine) error {
|
||||||
for _, token := range tokens {
|
for _, token := range tokens {
|
||||||
var scopes []string
|
var scopes []string
|
||||||
allNewScopesMap := make(map[AccessTokenScope]bool)
|
allNewScopesMap := make(map[AccessTokenScope]bool)
|
||||||
for _, oldScope := range strings.Split(token.Scope, ",") {
|
for oldScope := range strings.SplitSeq(token.Scope, ",") {
|
||||||
if newScopes, exists := accessTokenScopeMap[OldAccessTokenScope(oldScope)]; exists {
|
if newScopes, exists := accessTokenScopeMap[OldAccessTokenScope(oldScope)]; exists {
|
||||||
for _, newScope := range newScopes {
|
for _, newScope := range newScopes {
|
||||||
allNewScopesMap[newScope] = true
|
allNewScopesMap[newScope] = true
|
||||||
|
|
|
||||||
|
|
@ -9,6 +9,7 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"html/template"
|
"html/template"
|
||||||
|
"slices"
|
||||||
"strconv"
|
"strconv"
|
||||||
"unicode/utf8"
|
"unicode/utf8"
|
||||||
|
|
||||||
|
|
@ -198,12 +199,7 @@ func (t CommentType) HasMailReplySupport() bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t CommentType) CountedAsConversation() bool {
|
func (t CommentType) CountedAsConversation() bool {
|
||||||
for _, ct := range ConversationCountedCommentType() {
|
return slices.Contains(ConversationCountedCommentType(), t)
|
||||||
if t == ct {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ConversationCountedCommentType returns the comment types that are counted as a conversation
|
// ConversationCountedCommentType returns the comment types that are counted as a conversation
|
||||||
|
|
@ -619,7 +615,7 @@ func (c *Comment) UpdateAttachments(ctx context.Context, uuids []string) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("FindRepoAttachmentsByUUID[uuids=%q,repoID=%d]: %w", uuids, c.Issue.RepoID, err)
|
return fmt.Errorf("FindRepoAttachmentsByUUID[uuids=%q,repoID=%d]: %w", uuids, c.Issue.RepoID, err)
|
||||||
}
|
}
|
||||||
for i := 0; i < len(attachments); i++ {
|
for i := range attachments {
|
||||||
attachments[i].IssueID = c.IssueID
|
attachments[i].IssueID = c.IssueID
|
||||||
attachments[i].CommentID = c.ID
|
attachments[i].CommentID = c.ID
|
||||||
if err := repo_model.UpdateAttachment(ctx, attachments[i]); err != nil {
|
if err := repo_model.UpdateAttachment(ctx, attachments[i]); err != nil {
|
||||||
|
|
|
||||||
|
|
@ -54,10 +54,7 @@ func (comments CommentList) loadLabels(ctx context.Context) error {
|
||||||
commentLabels := make(map[int64]*Label, len(labelIDs))
|
commentLabels := make(map[int64]*Label, len(labelIDs))
|
||||||
left := len(labelIDs)
|
left := len(labelIDs)
|
||||||
for left > 0 {
|
for left > 0 {
|
||||||
limit := db.DefaultMaxInSize
|
limit := min(left, db.DefaultMaxInSize)
|
||||||
if left < limit {
|
|
||||||
limit = left
|
|
||||||
}
|
|
||||||
rows, err := db.GetEngine(ctx).
|
rows, err := db.GetEngine(ctx).
|
||||||
In("id", labelIDs[:limit]).
|
In("id", labelIDs[:limit]).
|
||||||
Rows(new(Label))
|
Rows(new(Label))
|
||||||
|
|
@ -104,10 +101,7 @@ func (comments CommentList) loadMilestones(ctx context.Context) error {
|
||||||
milestones := make(map[int64]*Milestone, len(milestoneIDs))
|
milestones := make(map[int64]*Milestone, len(milestoneIDs))
|
||||||
left := len(milestoneIDs)
|
left := len(milestoneIDs)
|
||||||
for left > 0 {
|
for left > 0 {
|
||||||
limit := db.DefaultMaxInSize
|
limit := min(left, db.DefaultMaxInSize)
|
||||||
if left < limit {
|
|
||||||
limit = left
|
|
||||||
}
|
|
||||||
err := db.GetEngine(ctx).
|
err := db.GetEngine(ctx).
|
||||||
In("id", milestoneIDs[:limit]).
|
In("id", milestoneIDs[:limit]).
|
||||||
Find(&milestones)
|
Find(&milestones)
|
||||||
|
|
@ -143,10 +137,7 @@ func (comments CommentList) loadOldMilestones(ctx context.Context) error {
|
||||||
milestones := make(map[int64]*Milestone, len(milestoneIDs))
|
milestones := make(map[int64]*Milestone, len(milestoneIDs))
|
||||||
left := len(milestoneIDs)
|
left := len(milestoneIDs)
|
||||||
for left > 0 {
|
for left > 0 {
|
||||||
limit := db.DefaultMaxInSize
|
limit := min(left, db.DefaultMaxInSize)
|
||||||
if left < limit {
|
|
||||||
limit = left
|
|
||||||
}
|
|
||||||
err := db.GetEngine(ctx).
|
err := db.GetEngine(ctx).
|
||||||
In("id", milestoneIDs[:limit]).
|
In("id", milestoneIDs[:limit]).
|
||||||
Find(&milestones)
|
Find(&milestones)
|
||||||
|
|
@ -178,10 +169,7 @@ func (comments CommentList) loadAssignees(ctx context.Context) error {
|
||||||
assignees := make(map[int64]*user_model.User, len(assigneeIDs))
|
assignees := make(map[int64]*user_model.User, len(assigneeIDs))
|
||||||
left := len(assigneeIDs)
|
left := len(assigneeIDs)
|
||||||
for left > 0 {
|
for left > 0 {
|
||||||
limit := db.DefaultMaxInSize
|
limit := min(left, db.DefaultMaxInSize)
|
||||||
if left < limit {
|
|
||||||
limit = left
|
|
||||||
}
|
|
||||||
rows, err := db.GetEngine(ctx).
|
rows, err := db.GetEngine(ctx).
|
||||||
In("id", assigneeIDs[:limit]).
|
In("id", assigneeIDs[:limit]).
|
||||||
Rows(new(user_model.User))
|
Rows(new(user_model.User))
|
||||||
|
|
@ -246,10 +234,7 @@ func (comments CommentList) LoadIssues(ctx context.Context) error {
|
||||||
issues := make(map[int64]*Issue, len(issueIDs))
|
issues := make(map[int64]*Issue, len(issueIDs))
|
||||||
left := len(issueIDs)
|
left := len(issueIDs)
|
||||||
for left > 0 {
|
for left > 0 {
|
||||||
limit := db.DefaultMaxInSize
|
limit := min(left, db.DefaultMaxInSize)
|
||||||
if left < limit {
|
|
||||||
limit = left
|
|
||||||
}
|
|
||||||
rows, err := db.GetEngine(ctx).
|
rows, err := db.GetEngine(ctx).
|
||||||
In("id", issueIDs[:limit]).
|
In("id", issueIDs[:limit]).
|
||||||
Rows(new(Issue))
|
Rows(new(Issue))
|
||||||
|
|
@ -300,10 +285,7 @@ func (comments CommentList) loadDependentIssues(ctx context.Context) error {
|
||||||
issues := make(map[int64]*Issue, len(issueIDs))
|
issues := make(map[int64]*Issue, len(issueIDs))
|
||||||
left := len(issueIDs)
|
left := len(issueIDs)
|
||||||
for left > 0 {
|
for left > 0 {
|
||||||
limit := db.DefaultMaxInSize
|
limit := min(left, db.DefaultMaxInSize)
|
||||||
if left < limit {
|
|
||||||
limit = left
|
|
||||||
}
|
|
||||||
rows, err := e.
|
rows, err := e.
|
||||||
In("id", issueIDs[:limit]).
|
In("id", issueIDs[:limit]).
|
||||||
Rows(new(Issue))
|
Rows(new(Issue))
|
||||||
|
|
@ -379,10 +361,7 @@ func (comments CommentList) LoadAttachments(ctx context.Context) (err error) {
|
||||||
commentsIDs := comments.getAttachmentCommentIDs()
|
commentsIDs := comments.getAttachmentCommentIDs()
|
||||||
left := len(commentsIDs)
|
left := len(commentsIDs)
|
||||||
for left > 0 {
|
for left > 0 {
|
||||||
limit := db.DefaultMaxInSize
|
limit := min(left, db.DefaultMaxInSize)
|
||||||
if left < limit {
|
|
||||||
limit = left
|
|
||||||
}
|
|
||||||
rows, err := db.GetEngine(ctx).
|
rows, err := db.GetEngine(ctx).
|
||||||
In("comment_id", commentsIDs[:limit]).
|
In("comment_id", commentsIDs[:limit]).
|
||||||
Rows(new(repo_model.Attachment))
|
Rows(new(repo_model.Attachment))
|
||||||
|
|
|
||||||
|
|
@ -43,10 +43,7 @@ func (issues IssueList) LoadRepositories(ctx context.Context) (repo_model.Reposi
|
||||||
repoMaps := make(map[int64]*repo_model.Repository, len(repoIDs))
|
repoMaps := make(map[int64]*repo_model.Repository, len(repoIDs))
|
||||||
left := len(repoIDs)
|
left := len(repoIDs)
|
||||||
for left > 0 {
|
for left > 0 {
|
||||||
limit := db.DefaultMaxInSize
|
limit := min(left, db.DefaultMaxInSize)
|
||||||
if left < limit {
|
|
||||||
limit = left
|
|
||||||
}
|
|
||||||
err := db.GetEngine(ctx).
|
err := db.GetEngine(ctx).
|
||||||
In("id", repoIDs[:limit]).
|
In("id", repoIDs[:limit]).
|
||||||
Find(&repoMaps)
|
Find(&repoMaps)
|
||||||
|
|
@ -99,10 +96,7 @@ func getPostersByIDs(ctx context.Context, posterIDs []int64) (map[int64]*user_mo
|
||||||
posterMaps := make(map[int64]*user_model.User, len(posterIDs))
|
posterMaps := make(map[int64]*user_model.User, len(posterIDs))
|
||||||
left := len(posterIDs)
|
left := len(posterIDs)
|
||||||
for left > 0 {
|
for left > 0 {
|
||||||
limit := db.DefaultMaxInSize
|
limit := min(left, db.DefaultMaxInSize)
|
||||||
if left < limit {
|
|
||||||
limit = left
|
|
||||||
}
|
|
||||||
err := db.GetEngine(ctx).
|
err := db.GetEngine(ctx).
|
||||||
In("id", posterIDs[:limit]).
|
In("id", posterIDs[:limit]).
|
||||||
Find(&posterMaps)
|
Find(&posterMaps)
|
||||||
|
|
@ -137,10 +131,7 @@ func (issues IssueList) LoadLabels(ctx context.Context) error {
|
||||||
issueIDs := issues.getIssueIDs()
|
issueIDs := issues.getIssueIDs()
|
||||||
left := len(issueIDs)
|
left := len(issueIDs)
|
||||||
for left > 0 {
|
for left > 0 {
|
||||||
limit := db.DefaultMaxInSize
|
limit := min(left, db.DefaultMaxInSize)
|
||||||
if left < limit {
|
|
||||||
limit = left
|
|
||||||
}
|
|
||||||
rows, err := db.GetEngine(ctx).Table("label").
|
rows, err := db.GetEngine(ctx).Table("label").
|
||||||
Join("LEFT", "issue_label", "issue_label.label_id = label.id").
|
Join("LEFT", "issue_label", "issue_label.label_id = label.id").
|
||||||
In("issue_label.issue_id", issueIDs[:limit]).
|
In("issue_label.issue_id", issueIDs[:limit]).
|
||||||
|
|
@ -192,10 +183,7 @@ func (issues IssueList) LoadMilestones(ctx context.Context) error {
|
||||||
milestoneMaps := make(map[int64]*Milestone, len(milestoneIDs))
|
milestoneMaps := make(map[int64]*Milestone, len(milestoneIDs))
|
||||||
left := len(milestoneIDs)
|
left := len(milestoneIDs)
|
||||||
for left > 0 {
|
for left > 0 {
|
||||||
limit := db.DefaultMaxInSize
|
limit := min(left, db.DefaultMaxInSize)
|
||||||
if left < limit {
|
|
||||||
limit = left
|
|
||||||
}
|
|
||||||
err := db.GetEngine(ctx).
|
err := db.GetEngine(ctx).
|
||||||
In("id", milestoneIDs[:limit]).
|
In("id", milestoneIDs[:limit]).
|
||||||
Find(&milestoneMaps)
|
Find(&milestoneMaps)
|
||||||
|
|
@ -224,10 +212,7 @@ func (issues IssueList) LoadProjects(ctx context.Context) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
for left > 0 {
|
for left > 0 {
|
||||||
limit := db.DefaultMaxInSize
|
limit := min(left, db.DefaultMaxInSize)
|
||||||
if left < limit {
|
|
||||||
limit = left
|
|
||||||
}
|
|
||||||
|
|
||||||
projects := make([]*projectWithIssueID, 0, limit)
|
projects := make([]*projectWithIssueID, 0, limit)
|
||||||
err := db.GetEngine(ctx).
|
err := db.GetEngine(ctx).
|
||||||
|
|
@ -266,10 +251,7 @@ func (issues IssueList) LoadAssignees(ctx context.Context) error {
|
||||||
issueIDs := issues.getIssueIDs()
|
issueIDs := issues.getIssueIDs()
|
||||||
left := len(issueIDs)
|
left := len(issueIDs)
|
||||||
for left > 0 {
|
for left > 0 {
|
||||||
limit := db.DefaultMaxInSize
|
limit := min(left, db.DefaultMaxInSize)
|
||||||
if left < limit {
|
|
||||||
limit = left
|
|
||||||
}
|
|
||||||
rows, err := db.GetEngine(ctx).Table("issue_assignees").
|
rows, err := db.GetEngine(ctx).Table("issue_assignees").
|
||||||
Join("INNER", "`user`", "`user`.id = `issue_assignees`.assignee_id").
|
Join("INNER", "`user`", "`user`.id = `issue_assignees`.assignee_id").
|
||||||
In("`issue_assignees`.issue_id", issueIDs[:limit]).OrderBy(user_model.GetOrderByName()).
|
In("`issue_assignees`.issue_id", issueIDs[:limit]).OrderBy(user_model.GetOrderByName()).
|
||||||
|
|
@ -327,10 +309,7 @@ func (issues IssueList) LoadPullRequests(ctx context.Context) error {
|
||||||
pullRequestMaps := make(map[int64]*PullRequest, len(issuesIDs))
|
pullRequestMaps := make(map[int64]*PullRequest, len(issuesIDs))
|
||||||
left := len(issuesIDs)
|
left := len(issuesIDs)
|
||||||
for left > 0 {
|
for left > 0 {
|
||||||
limit := db.DefaultMaxInSize
|
limit := min(left, db.DefaultMaxInSize)
|
||||||
if left < limit {
|
|
||||||
limit = left
|
|
||||||
}
|
|
||||||
rows, err := db.GetEngine(ctx).
|
rows, err := db.GetEngine(ctx).
|
||||||
In("issue_id", issuesIDs[:limit]).
|
In("issue_id", issuesIDs[:limit]).
|
||||||
Rows(new(PullRequest))
|
Rows(new(PullRequest))
|
||||||
|
|
@ -375,10 +354,7 @@ func (issues IssueList) LoadAttachments(ctx context.Context) (err error) {
|
||||||
issuesIDs := issues.getIssueIDs()
|
issuesIDs := issues.getIssueIDs()
|
||||||
left := len(issuesIDs)
|
left := len(issuesIDs)
|
||||||
for left > 0 {
|
for left > 0 {
|
||||||
limit := db.DefaultMaxInSize
|
limit := min(left, db.DefaultMaxInSize)
|
||||||
if left < limit {
|
|
||||||
limit = left
|
|
||||||
}
|
|
||||||
rows, err := db.GetEngine(ctx).
|
rows, err := db.GetEngine(ctx).
|
||||||
In("issue_id", issuesIDs[:limit]).
|
In("issue_id", issuesIDs[:limit]).
|
||||||
Rows(new(repo_model.Attachment))
|
Rows(new(repo_model.Attachment))
|
||||||
|
|
@ -420,10 +396,7 @@ func (issues IssueList) loadComments(ctx context.Context, cond builder.Cond) (er
|
||||||
issuesIDs := issues.getIssueIDs()
|
issuesIDs := issues.getIssueIDs()
|
||||||
left := len(issuesIDs)
|
left := len(issuesIDs)
|
||||||
for left > 0 {
|
for left > 0 {
|
||||||
limit := db.DefaultMaxInSize
|
limit := min(left, db.DefaultMaxInSize)
|
||||||
if left < limit {
|
|
||||||
limit = left
|
|
||||||
}
|
|
||||||
rows, err := db.GetEngine(ctx).Table("comment").
|
rows, err := db.GetEngine(ctx).Table("comment").
|
||||||
Join("INNER", "issue", "issue.id = comment.issue_id").
|
Join("INNER", "issue", "issue.id = comment.issue_id").
|
||||||
In("issue.id", issuesIDs[:limit]).
|
In("issue.id", issuesIDs[:limit]).
|
||||||
|
|
@ -486,10 +459,7 @@ func (issues IssueList) loadTotalTrackedTimes(ctx context.Context) (err error) {
|
||||||
|
|
||||||
left := len(ids)
|
left := len(ids)
|
||||||
for left > 0 {
|
for left > 0 {
|
||||||
limit := db.DefaultMaxInSize
|
limit := min(left, db.DefaultMaxInSize)
|
||||||
if left < limit {
|
|
||||||
limit = left
|
|
||||||
}
|
|
||||||
|
|
||||||
// select issue_id, sum(time) from tracked_time where issue_id in (<issue ids in current page>) group by issue_id
|
// select issue_id, sum(time) from tracked_time where issue_id in (<issue ids in current page>) group by issue_id
|
||||||
rows, err := db.GetEngine(ctx).Table("tracked_time").
|
rows, err := db.GetEngine(ctx).Table("tracked_time").
|
||||||
|
|
|
||||||
|
|
@ -94,10 +94,7 @@ func GetIssueStats(ctx context.Context, opts *IssuesOptions) (*IssueStats, error
|
||||||
// ids in a temporary table and join from them.
|
// ids in a temporary table and join from them.
|
||||||
accum := &IssueStats{}
|
accum := &IssueStats{}
|
||||||
for i := 0; i < len(opts.IssueIDs); {
|
for i := 0; i < len(opts.IssueIDs); {
|
||||||
chunk := i + MaxQueryParameters
|
chunk := min(i+MaxQueryParameters, len(opts.IssueIDs))
|
||||||
if chunk > len(opts.IssueIDs) {
|
|
||||||
chunk = len(opts.IssueIDs)
|
|
||||||
}
|
|
||||||
stats, err := getIssueStatsChunk(ctx, opts, opts.IssueIDs[i:chunk])
|
stats, err := getIssueStatsChunk(ctx, opts, opts.IssueIDs[i:chunk])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
|
||||||
|
|
@ -5,6 +5,7 @@ package issues_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"slices"
|
||||||
"sort"
|
"sort"
|
||||||
"sync"
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
@ -311,7 +312,7 @@ func TestIssue_ResolveMentions(t *testing.T) {
|
||||||
for i, user := range resolved {
|
for i, user := range resolved {
|
||||||
ids[i] = user.ID
|
ids[i] = user.ID
|
||||||
}
|
}
|
||||||
sort.Slice(ids, func(i, j int) bool { return ids[i] < ids[j] })
|
slices.Sort(ids)
|
||||||
assert.Equal(t, expected, ids)
|
assert.Equal(t, expected, ids)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -338,7 +339,7 @@ func TestResourceIndex(t *testing.T) {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
for i := 0; i < 100; i++ {
|
for i := range 100 {
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
t.Run(fmt.Sprintf("issue %d", i+1), func(t *testing.T) {
|
t.Run(fmt.Sprintf("issue %d", i+1), func(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
@ -369,7 +370,7 @@ func TestCorrectIssueStats(t *testing.T) {
|
||||||
issueAmount := issues_model.MaxQueryParameters + 10
|
issueAmount := issues_model.MaxQueryParameters + 10
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
for i := 0; i < issueAmount; i++ {
|
for i := range issueAmount {
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func(i int) {
|
go func(i int) {
|
||||||
testInsertIssue(t, fmt.Sprintf("Issue %d", i+1), "Bugs are nasty", 0)
|
testInsertIssue(t, fmt.Sprintf("Issue %d", i+1), "Bugs are nasty", 0)
|
||||||
|
|
|
||||||
|
|
@ -244,7 +244,7 @@ func UpdateIssueAttachments(ctx context.Context, issue *Issue, uuids []string) (
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("FindRepoAttachmentsByUUID[uuids=%q,repoID=%d]: %w", uuids, issue.RepoID, err)
|
return fmt.Errorf("FindRepoAttachmentsByUUID[uuids=%q,repoID=%d]: %w", uuids, issue.RepoID, err)
|
||||||
}
|
}
|
||||||
for i := 0; i < len(attachments); i++ {
|
for i := range attachments {
|
||||||
attachments[i].IssueID = issue.ID
|
attachments[i].IssueID = issue.ID
|
||||||
if err := repo_model.UpdateAttachment(ctx, attachments[i]); err != nil {
|
if err := repo_model.UpdateAttachment(ctx, attachments[i]); err != nil {
|
||||||
return fmt.Errorf("update attachment [id: %d]: %w", attachments[i].ID, err)
|
return fmt.Errorf("update attachment [id: %d]: %w", attachments[i].ID, err)
|
||||||
|
|
|
||||||
|
|
@ -20,7 +20,7 @@ type ReviewList []*Review
|
||||||
// LoadReviewers loads reviewers
|
// LoadReviewers loads reviewers
|
||||||
func (reviews ReviewList) LoadReviewers(ctx context.Context) error {
|
func (reviews ReviewList) LoadReviewers(ctx context.Context) error {
|
||||||
reviewerIDs := make([]int64, len(reviews))
|
reviewerIDs := make([]int64, len(reviews))
|
||||||
for i := 0; i < len(reviews); i++ {
|
for i := range reviews {
|
||||||
reviewerIDs[i] = reviews[i].ReviewerID
|
reviewerIDs[i] = reviews[i].ReviewerID
|
||||||
}
|
}
|
||||||
reviewers, err := user_model.GetPossibleUserByIDs(ctx, reviewerIDs)
|
reviewers, err := user_model.GetPossibleUserByIDs(ctx, reviewerIDs)
|
||||||
|
|
|
||||||
|
|
@ -350,10 +350,7 @@ func GetIssueTotalTrackedTime(ctx context.Context, opts *IssuesOptions, isClosed
|
||||||
// we get the statistics in smaller chunks and get accumulates
|
// we get the statistics in smaller chunks and get accumulates
|
||||||
var accum int64
|
var accum int64
|
||||||
for i := 0; i < len(opts.IssueIDs); {
|
for i := 0; i < len(opts.IssueIDs); {
|
||||||
chunk := i + MaxQueryParameters
|
chunk := min(i+MaxQueryParameters, len(opts.IssueIDs))
|
||||||
if chunk > len(opts.IssueIDs) {
|
|
||||||
chunk = len(opts.IssueIDs)
|
|
||||||
}
|
|
||||||
time, err := getIssueTotalTrackedTimeChunk(ctx, opts, isClosed, opts.IssueIDs[i:chunk])
|
time, err := getIssueTotalTrackedTimeChunk(ctx, opts, isClosed, opts.IssueIDs[i:chunk])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
|
|
|
||||||
|
|
@ -6,6 +6,7 @@ package access
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
actions_model "forgejo.org/models/actions"
|
actions_model "forgejo.org/models/actions"
|
||||||
"forgejo.org/models/db"
|
"forgejo.org/models/db"
|
||||||
|
|
@ -115,7 +116,8 @@ func (p *Permission) CanWriteIssuesOrPulls(isPull bool) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *Permission) LogString() string {
|
func (p *Permission) LogString() string {
|
||||||
format := "<Permission AccessMode=%s, %d Units, %d UnitsMode(s): [ "
|
var format strings.Builder
|
||||||
|
format.WriteString("<Permission AccessMode=%s, %d Units, %d UnitsMode(s): [ ")
|
||||||
args := []any{p.AccessMode.String(), len(p.Units), len(p.UnitsMode)}
|
args := []any{p.AccessMode.String(), len(p.Units), len(p.UnitsMode)}
|
||||||
|
|
||||||
for i, unit := range p.Units {
|
for i, unit := range p.Units {
|
||||||
|
|
@ -127,15 +129,15 @@ func (p *Permission) LogString() string {
|
||||||
config = err.Error()
|
config = err.Error()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
format += "\nUnits[%d]: ID: %d RepoID: %d Type: %s Config: %s"
|
format.WriteString("\nUnits[%d]: ID: %d RepoID: %d Type: %s Config: %s")
|
||||||
args = append(args, i, unit.ID, unit.RepoID, unit.Type.LogString(), config)
|
args = append(args, i, unit.ID, unit.RepoID, unit.Type.LogString(), config)
|
||||||
}
|
}
|
||||||
for key, value := range p.UnitsMode {
|
for key, value := range p.UnitsMode {
|
||||||
format += "\nUnitMode[%-v]: %-v"
|
format.WriteString("\nUnitMode[%-v]: %-v")
|
||||||
args = append(args, key.LogString(), value.LogString())
|
args = append(args, key.LogString(), value.LogString())
|
||||||
}
|
}
|
||||||
format += " ]>"
|
format.WriteString(" ]>")
|
||||||
return fmt.Sprintf(format, args...)
|
return fmt.Sprintf(format.String(), args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetActionRepoPermission(ctx context.Context, repo *repo_model.Repository, task *actions_model.ActionTask) (Permission, error) {
|
func GetActionRepoPermission(ctx context.Context, repo *repo_model.Repository, task *actions_model.ActionTask) (Permission, error) {
|
||||||
|
|
|
||||||
|
|
@ -164,7 +164,7 @@ func Test_NewColumn(t *testing.T) {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Len(t, columns, 3)
|
assert.Len(t, columns, 3)
|
||||||
|
|
||||||
for i := 0; i < maxProjectColumns-3; i++ {
|
for i := range maxProjectColumns - 3 {
|
||||||
err := NewColumn(db.DefaultContext, &Column{
|
err := NewColumn(db.DefaultContext, &Column{
|
||||||
Title: fmt.Sprintf("column-%d", i+4),
|
Title: fmt.Sprintf("column-%d", i+4),
|
||||||
ProjectID: project1.ID,
|
ProjectID: project1.ID,
|
||||||
|
|
|
||||||
|
|
@ -6,6 +6,7 @@ package pull
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"maps"
|
||||||
|
|
||||||
"forgejo.org/models/db"
|
"forgejo.org/models/db"
|
||||||
"forgejo.org/modules/log"
|
"forgejo.org/modules/log"
|
||||||
|
|
@ -100,9 +101,7 @@ func mergeFiles(oldFiles, newFiles map[string]ViewedState) map[string]ViewedStat
|
||||||
return oldFiles
|
return oldFiles
|
||||||
}
|
}
|
||||||
|
|
||||||
for file, viewed := range newFiles {
|
maps.Copy(oldFiles, newFiles)
|
||||||
oldFiles[file] = viewed
|
|
||||||
}
|
|
||||||
return oldFiles
|
return oldFiles
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -9,6 +9,7 @@ import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"html/template"
|
"html/template"
|
||||||
|
"maps"
|
||||||
"net"
|
"net"
|
||||||
"net/url"
|
"net/url"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
|
@ -543,9 +544,7 @@ func (repo *Repository) ComposeMetas(ctx context.Context) map[string]string {
|
||||||
func (repo *Repository) ComposeDocumentMetas(ctx context.Context) map[string]string {
|
func (repo *Repository) ComposeDocumentMetas(ctx context.Context) map[string]string {
|
||||||
if len(repo.DocumentRenderingMetas) == 0 {
|
if len(repo.DocumentRenderingMetas) == 0 {
|
||||||
metas := map[string]string{}
|
metas := map[string]string{}
|
||||||
for k, v := range repo.ComposeMetas(ctx) {
|
maps.Copy(metas, repo.ComposeMetas(ctx))
|
||||||
metas[k] = v
|
|
||||||
}
|
|
||||||
metas["mode"] = "document"
|
metas["mode"] = "document"
|
||||||
repo.DocumentRenderingMetas = metas
|
repo.DocumentRenderingMetas = metas
|
||||||
}
|
}
|
||||||
|
|
@ -786,8 +785,8 @@ func GetRepositoryByName(ctx context.Context, ownerID int64, name string) (*Repo
|
||||||
|
|
||||||
// getRepositoryURLPathSegments returns segments (owner, reponame) extracted from a url
|
// getRepositoryURLPathSegments returns segments (owner, reponame) extracted from a url
|
||||||
func getRepositoryURLPathSegments(repoURL string) []string {
|
func getRepositoryURLPathSegments(repoURL string) []string {
|
||||||
if strings.HasPrefix(repoURL, setting.AppURL) {
|
if after, ok := strings.CutPrefix(repoURL, setting.AppURL); ok {
|
||||||
return strings.Split(strings.TrimPrefix(repoURL, setting.AppURL), "/")
|
return strings.Split(after, "/")
|
||||||
}
|
}
|
||||||
|
|
||||||
sshURLVariants := [4]string{
|
sshURLVariants := [4]string{
|
||||||
|
|
@ -798,8 +797,8 @@ func getRepositoryURLPathSegments(repoURL string) []string {
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, sshURL := range sshURLVariants {
|
for _, sshURL := range sshURLVariants {
|
||||||
if strings.HasPrefix(repoURL, sshURL) {
|
if after, ok := strings.CutPrefix(repoURL, sshURL); ok {
|
||||||
return strings.Split(strings.TrimPrefix(repoURL, sshURL), "/")
|
return strings.Split(after, "/")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -401,7 +401,7 @@ func SearchRepositoryCondition(opts *SearchRepoOptions) builder.Cond {
|
||||||
if opts.Keyword != "" {
|
if opts.Keyword != "" {
|
||||||
// separate keyword
|
// separate keyword
|
||||||
subQueryCond := builder.NewCond()
|
subQueryCond := builder.NewCond()
|
||||||
for _, v := range strings.Split(opts.Keyword, ",") {
|
for v := range strings.SplitSeq(opts.Keyword, ",") {
|
||||||
if opts.TopicOnly {
|
if opts.TopicOnly {
|
||||||
subQueryCond = subQueryCond.Or(builder.Eq{"topic.name": strings.ToLower(v)})
|
subQueryCond = subQueryCond.Or(builder.Eq{"topic.name": strings.ToLower(v)})
|
||||||
} else {
|
} else {
|
||||||
|
|
@ -416,7 +416,7 @@ func SearchRepositoryCondition(opts *SearchRepoOptions) builder.Cond {
|
||||||
keywordCond := builder.In("id", subQuery)
|
keywordCond := builder.In("id", subQuery)
|
||||||
if !opts.TopicOnly {
|
if !opts.TopicOnly {
|
||||||
likes := builder.NewCond()
|
likes := builder.NewCond()
|
||||||
for _, v := range strings.Split(opts.Keyword, ",") {
|
for v := range strings.SplitSeq(opts.Keyword, ",") {
|
||||||
likes = likes.Or(builder.Like{"lower_name", strings.ToLower(v)})
|
likes = likes.Or(builder.Like{"lower_name", strings.ToLower(v)})
|
||||||
|
|
||||||
// If the string looks like "org/repo", match against that pattern too
|
// If the string looks like "org/repo", match against that pattern too
|
||||||
|
|
|
||||||
|
|
@ -237,10 +237,8 @@ func (cfg *ActionsConfig) IsWorkflowDisabled(file string) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cfg *ActionsConfig) DisableWorkflow(file string) {
|
func (cfg *ActionsConfig) DisableWorkflow(file string) {
|
||||||
for _, workflow := range cfg.DisabledWorkflows {
|
if slices.Contains(cfg.DisabledWorkflows, file) {
|
||||||
if file == workflow {
|
return
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
cfg.DisabledWorkflows = append(cfg.DisabledWorkflows, file)
|
cfg.DisabledWorkflows = append(cfg.DisabledWorkflows, file)
|
||||||
|
|
|
||||||
|
|
@ -117,7 +117,7 @@ func DeleteUploads(ctx context.Context, uploads ...*Upload) (err error) {
|
||||||
defer committer.Close()
|
defer committer.Close()
|
||||||
|
|
||||||
ids := make([]int64, len(uploads))
|
ids := make([]int64, len(uploads))
|
||||||
for i := 0; i < len(uploads); i++ {
|
for i := range uploads {
|
||||||
ids[i] = uploads[i].ID
|
ids[i] = uploads[i].ID
|
||||||
}
|
}
|
||||||
if err = db.DeleteByIDs[Upload](ctx, ids...); err != nil {
|
if err = db.DeleteByIDs[Upload](ctx, ids...); err != nil {
|
||||||
|
|
|
||||||
|
|
@ -248,22 +248,12 @@ func LoadUnitConfig() error {
|
||||||
|
|
||||||
// UnitGlobalDisabled checks if unit type is global disabled
|
// UnitGlobalDisabled checks if unit type is global disabled
|
||||||
func (u Type) UnitGlobalDisabled() bool {
|
func (u Type) UnitGlobalDisabled() bool {
|
||||||
for _, ud := range DisabledRepoUnitsGet() {
|
return slices.Contains(DisabledRepoUnitsGet(), u)
|
||||||
if u == ud {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// CanBeDefault checks if the unit type can be a default repo unit
|
// CanBeDefault checks if the unit type can be a default repo unit
|
||||||
func (u *Type) CanBeDefault() bool {
|
func (u *Type) CanBeDefault() bool {
|
||||||
for _, nadU := range NotAllowedDefaultRepoUnits {
|
return !slices.Contains(NotAllowedDefaultRepoUnits, *u)
|
||||||
if *u == nadU {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Unit is a section of one repository
|
// Unit is a section of one repository
|
||||||
|
|
|
||||||
|
|
@ -151,8 +151,8 @@ func (l *loader) buildFixtureFile(fixturePath string) (*fixtureFile, error) {
|
||||||
switch v := value.(type) {
|
switch v := value.(type) {
|
||||||
case string:
|
case string:
|
||||||
// Try to decode hex.
|
// Try to decode hex.
|
||||||
if strings.HasPrefix(v, "0x") {
|
if after, ok := strings.CutPrefix(v, "0x"); ok {
|
||||||
value, err = hex.DecodeString(strings.TrimPrefix(v, "0x"))
|
value, err = hex.DecodeString(after)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -102,13 +102,13 @@ func NewMockWebServer(t *testing.T, liveServerBaseURL, testDataDir string, liveM
|
||||||
// parse back the fixture file into a series of HTTP headers followed by response body
|
// parse back the fixture file into a series of HTTP headers followed by response body
|
||||||
lines := strings.Split(stringFixture, "\n")
|
lines := strings.Split(stringFixture, "\n")
|
||||||
for idx, line := range lines {
|
for idx, line := range lines {
|
||||||
colonIndex := strings.Index(line, ": ")
|
before, after, ok := strings.Cut(line, ": ")
|
||||||
if colonIndex != -1 {
|
if ok {
|
||||||
// Because we modified the body with ReplaceAll() above, we need to
|
// Because we modified the body with ReplaceAll() above, we need to
|
||||||
// remove Content-Length. w.Write() should add it back.
|
// remove Content-Length. w.Write() should add it back.
|
||||||
header := line[0:colonIndex]
|
header := before
|
||||||
if !strings.EqualFold(header, "Content-Length") {
|
if !strings.EqualFold(header, "Content-Length") {
|
||||||
w.Header().Set(line[0:colonIndex], line[colonIndex+2:])
|
w.Header().Set(before, after)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// we reached the end of the headers (empty line), so what follows is the body
|
// we reached the end of the headers (empty line), so what follows is the body
|
||||||
|
|
|
||||||
|
|
@ -9,7 +9,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
func fieldByName(v reflect.Value, field string) reflect.Value {
|
func fieldByName(v reflect.Value, field string) reflect.Value {
|
||||||
if v.Kind() == reflect.Ptr {
|
if v.Kind() == reflect.Pointer {
|
||||||
v = v.Elem()
|
v = v.Elem()
|
||||||
}
|
}
|
||||||
f := v.FieldByName(field)
|
f := v.FieldByName(field)
|
||||||
|
|
|
||||||
|
|
@ -108,7 +108,7 @@ func (u *User) IsUploadAvatarChanged(data []byte) bool {
|
||||||
if !u.UseCustomAvatar || len(u.Avatar) == 0 {
|
if !u.UseCustomAvatar || len(u.Avatar) == 0 {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
avatarID := fmt.Sprintf("%x", md5.Sum([]byte(fmt.Sprintf("%d-%x", u.ID, md5.Sum(data)))))
|
avatarID := fmt.Sprintf("%x", md5.Sum(fmt.Appendf(nil, "%d-%x", u.ID, md5.Sum(data))))
|
||||||
return u.Avatar != avatarID
|
return u.Avatar != avatarID
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -5,6 +5,7 @@ package user_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"slices"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"forgejo.org/models/db"
|
"forgejo.org/models/db"
|
||||||
|
|
@ -77,12 +78,7 @@ func TestListEmails(t *testing.T) {
|
||||||
assert.Greater(t, count, int64(5))
|
assert.Greater(t, count, int64(5))
|
||||||
|
|
||||||
contains := func(match func(s *user_model.SearchEmailResult) bool) bool {
|
contains := func(match func(s *user_model.SearchEmailResult) bool) bool {
|
||||||
for _, v := range emails {
|
return slices.ContainsFunc(emails, match)
|
||||||
if match(v) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
assert.True(t, contains(func(s *user_model.SearchEmailResult) bool { return s.UID == 18 }))
|
assert.True(t, contains(func(s *user_model.SearchEmailResult) bool { return s.UID == 18 }))
|
||||||
|
|
|
||||||
|
|
@ -87,7 +87,7 @@ func newUserData(user *User) UserData {
|
||||||
// (e.g. FieldName -> field_name) corresponding to UserData struct fields.
|
// (e.g. FieldName -> field_name) corresponding to UserData struct fields.
|
||||||
var userDataColumnNames = sync.OnceValue(func() []string {
|
var userDataColumnNames = sync.OnceValue(func() []string {
|
||||||
mapper := new(names.GonicMapper)
|
mapper := new(names.GonicMapper)
|
||||||
udType := reflect.TypeOf(UserData{})
|
udType := reflect.TypeFor[UserData]()
|
||||||
columnNames := make([]string, 0, udType.NumField())
|
columnNames := make([]string, 0, udType.NumField())
|
||||||
for i := 0; i < udType.NumField(); i++ {
|
for i := 0; i < udType.NumField(); i++ {
|
||||||
columnNames = append(columnNames, mapper.Obj2Table(udType.Field(i).Name))
|
columnNames = append(columnNames, mapper.Obj2Table(udType.Field(i).Name))
|
||||||
|
|
|
||||||
|
|
@ -1243,8 +1243,8 @@ func GetUserByEmail(ctx context.Context, email string) (*User, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Finally, if email address is the protected email address:
|
// Finally, if email address is the protected email address:
|
||||||
if strings.HasSuffix(email, fmt.Sprintf("@%s", setting.Service.NoReplyAddress)) {
|
if before, ok := strings.CutSuffix(email, fmt.Sprintf("@%s", setting.Service.NoReplyAddress)); ok {
|
||||||
username := strings.TrimSuffix(email, fmt.Sprintf("@%s", setting.Service.NoReplyAddress))
|
username := before
|
||||||
user := &User{}
|
user := &User{}
|
||||||
has, err := db.GetEngine(ctx).Where("lower_name=?", username).Get(user)
|
has, err := db.GetEngine(ctx).Where("lower_name=?", username).Get(user)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
||||||
|
|
@ -273,9 +273,9 @@ func TestHashPasswordDeterministic(t *testing.T) {
|
||||||
b := make([]byte, 16)
|
b := make([]byte, 16)
|
||||||
u := &user_model.User{}
|
u := &user_model.User{}
|
||||||
algos := hash.RecommendedHashAlgorithms
|
algos := hash.RecommendedHashAlgorithms
|
||||||
for j := 0; j < len(algos); j++ {
|
for j := range algos {
|
||||||
u.PasswdHashAlgo = algos[j]
|
u.PasswdHashAlgo = algos[j]
|
||||||
for i := 0; i < 50; i++ {
|
for range 50 {
|
||||||
// generate a random password
|
// generate a random password
|
||||||
rand.Read(b)
|
rand.Read(b)
|
||||||
pass := string(b)
|
pass := string(b)
|
||||||
|
|
|
||||||
|
|
@ -429,7 +429,7 @@ func CreateWebhooks(ctx context.Context, ws []*Webhook) error {
|
||||||
if len(ws) == 0 {
|
if len(ws) == 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
for i := 0; i < len(ws); i++ {
|
for i := range ws {
|
||||||
ws[i].Type = strings.TrimSpace(ws[i].Type)
|
ws[i].Type = strings.TrimSpace(ws[i].Type)
|
||||||
}
|
}
|
||||||
return db.Insert(ctx, ws)
|
return db.Insert(ctx, ws)
|
||||||
|
|
|
||||||
|
|
@ -7,6 +7,7 @@ import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"slices"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
actions_model "forgejo.org/models/actions"
|
actions_model "forgejo.org/models/actions"
|
||||||
|
|
@ -609,11 +610,8 @@ func matchPullRequestReviewEvent(prPayload *api.PullRequestPayload, evt *jobpars
|
||||||
|
|
||||||
matched := false
|
matched := false
|
||||||
for _, val := range vals {
|
for _, val := range vals {
|
||||||
for _, action := range actions {
|
if slices.ContainsFunc(actions, glob.MustCompile(val, '/').Match) {
|
||||||
if glob.MustCompile(val, '/').Match(action) {
|
matched = true
|
||||||
matched = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if matched {
|
if matched {
|
||||||
break
|
break
|
||||||
|
|
@ -658,11 +656,8 @@ func matchPullRequestReviewCommentEvent(prPayload *api.PullRequestPayload, evt *
|
||||||
|
|
||||||
matched := false
|
matched := false
|
||||||
for _, val := range vals {
|
for _, val := range vals {
|
||||||
for _, action := range actions {
|
if slices.ContainsFunc(actions, glob.MustCompile(val, '/').Match) {
|
||||||
if glob.MustCompile(val, '/').Match(action) {
|
matched = true
|
||||||
matched = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if matched {
|
if matched {
|
||||||
break
|
break
|
||||||
|
|
|
||||||
|
|
@ -101,7 +101,7 @@ func Generate(n int) (string, error) {
|
||||||
buffer := make([]byte, n)
|
buffer := make([]byte, n)
|
||||||
max := big.NewInt(int64(len(validChars)))
|
max := big.NewInt(int64(len(validChars)))
|
||||||
for {
|
for {
|
||||||
for j := 0; j < n; j++ {
|
for j := range n {
|
||||||
rnd, err := rand.Int(rand.Reader, max)
|
rnd, err := rand.Int(rand.Reader, max)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
|
|
|
||||||
|
|
@ -51,7 +51,7 @@ func TestComplexity_Generate(t *testing.T) {
|
||||||
|
|
||||||
test := func(t *testing.T, modes []string) {
|
test := func(t *testing.T, modes []string) {
|
||||||
testComplextity(modes)
|
testComplextity(modes)
|
||||||
for i := 0; i < maxCount; i++ {
|
for range maxCount {
|
||||||
pwd, err := Generate(pwdLen)
|
pwd, err := Generate(pwdLen)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Len(t, pwd, pwdLen)
|
assert.Len(t, pwd, pwdLen)
|
||||||
|
|
|
||||||
|
|
@ -101,7 +101,7 @@ func (c *Client) CheckPassword(pw string, padding bool) (int, error) {
|
||||||
}
|
}
|
||||||
defer resp.Body.Close()
|
defer resp.Body.Close()
|
||||||
|
|
||||||
for _, pair := range strings.Split(string(body), "\n") {
|
for pair := range strings.SplitSeq(string(body), "\n") {
|
||||||
parts := strings.Split(pair, ":")
|
parts := strings.Split(pair, ":")
|
||||||
if len(parts) != 2 {
|
if len(parts) != 2 {
|
||||||
continue
|
continue
|
||||||
|
|
|
||||||
|
|
@ -24,8 +24,8 @@ func drawBlock(img *image.Paletted, x, y, size, angle int, points []int) {
|
||||||
rotate(points, m, m, angle)
|
rotate(points, m, m, angle)
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := 0; i < size; i++ {
|
for i := range size {
|
||||||
for j := 0; j < size; j++ {
|
for j := range size {
|
||||||
if pointInPolygon(i, j, points) {
|
if pointInPolygon(i, j, points) {
|
||||||
img.SetColorIndex(x+i, y+j, 1)
|
img.SetColorIndex(x+i, y+j, 1)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -134,7 +134,7 @@ func drawBlocks(p *image.Paletted, size int, c, b1, b2 blockFunc, b1Angle, b2Ang
|
||||||
|
|
||||||
// then we make it left-right mirror, so we didn't draw 3/6/9 before
|
// then we make it left-right mirror, so we didn't draw 3/6/9 before
|
||||||
for x := 0; x < size/2; x++ {
|
for x := 0; x < size/2; x++ {
|
||||||
for y := 0; y < size; y++ {
|
for y := range size {
|
||||||
p.SetColorIndex(size-x, y, p.ColorIndexAt(x, y))
|
p.SetColorIndex(size-x, y, p.ColorIndexAt(x, y))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -164,7 +164,7 @@ func DetectEncoding(content []byte) (string, error) {
|
||||||
}
|
}
|
||||||
times := 1024 / len(content)
|
times := 1024 / len(content)
|
||||||
detectContent = make([]byte, 0, times*len(content))
|
detectContent = make([]byte, 0, times*len(content))
|
||||||
for i := 0; i < times; i++ {
|
for range times {
|
||||||
detectContent = append(detectContent, content...)
|
detectContent = append(detectContent, content...)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
|
|
||||||
|
|
@ -243,7 +243,7 @@ func stringMustEndWith(t *testing.T, expected, value string) {
|
||||||
func TestToUTF8WithFallbackReader(t *testing.T) {
|
func TestToUTF8WithFallbackReader(t *testing.T) {
|
||||||
resetDefaultCharsetsOrder()
|
resetDefaultCharsetsOrder()
|
||||||
|
|
||||||
for testLen := 0; testLen < 2048; testLen++ {
|
for testLen := range 2048 {
|
||||||
pattern := " test { () }\n"
|
pattern := " test { () }\n"
|
||||||
input := ""
|
input := ""
|
||||||
for len(input) < testLen {
|
for len(input) < testLen {
|
||||||
|
|
|
||||||
|
|
@ -6,6 +6,7 @@ package forgefed
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
"slices"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
|
@ -107,12 +108,7 @@ func newActorID(uri string) (ActorID, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func containsEmptyString(ar []string) bool {
|
func containsEmptyString(ar []string) bool {
|
||||||
for _, elem := range ar {
|
return slices.Contains(ar, "")
|
||||||
if elem == "" {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func removeEmptyStrings(ls []string) []string {
|
func removeEmptyStrings(ls []string) []string {
|
||||||
|
|
|
||||||
|
|
@ -88,7 +88,7 @@ func ToRepository(it ap.Item) (*Repository, error) {
|
||||||
return (*Repository)(unsafe.Pointer(&i)), nil
|
return (*Repository)(unsafe.Pointer(&i)), nil
|
||||||
default:
|
default:
|
||||||
// NOTE(marius): this is an ugly way of dealing with the interface conversion error: types from different scopes
|
// NOTE(marius): this is an ugly way of dealing with the interface conversion error: types from different scopes
|
||||||
typ := reflect.TypeOf(new(Repository))
|
typ := reflect.TypeFor[*Repository]()
|
||||||
if i, ok := reflect.ValueOf(it).Convert(typ).Interface().(*Repository); ok {
|
if i, ok := reflect.ValueOf(it).Convert(typ).Interface().(*Repository); ok {
|
||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -269,8 +269,8 @@ func NewSearchCommitsOptions(searchString string, forAllRefs bool) SearchCommits
|
||||||
var keywords, authors, committers []string
|
var keywords, authors, committers []string
|
||||||
var after, before string
|
var after, before string
|
||||||
|
|
||||||
fields := strings.Fields(searchString)
|
fields := strings.FieldsSeq(searchString)
|
||||||
for _, k := range fields {
|
for k := range fields {
|
||||||
switch {
|
switch {
|
||||||
case strings.HasPrefix(k, "author:"):
|
case strings.HasPrefix(k, "author:"):
|
||||||
authors = append(authors, strings.TrimPrefix(k, "author:"))
|
authors = append(authors, strings.TrimPrefix(k, "author:"))
|
||||||
|
|
|
||||||
|
|
@ -7,6 +7,7 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"maps"
|
||||||
"path"
|
"path"
|
||||||
"sort"
|
"sort"
|
||||||
|
|
||||||
|
|
@ -45,9 +46,7 @@ func (tes Entries) GetCommitsInfo(ctx context.Context, commit *Commit, treePath
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
for pth, found := range commits {
|
maps.Copy(revs, commits)
|
||||||
revs[pth] = found
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
sort.Strings(entryPaths)
|
sort.Strings(entryPaths)
|
||||||
|
|
|
||||||
|
|
@ -75,9 +75,9 @@ func (f Format) Parser(r io.Reader) *Parser {
|
||||||
// hexEscaped produces hex-escaped characters from a string. For example, "\n\0"
|
// hexEscaped produces hex-escaped characters from a string. For example, "\n\0"
|
||||||
// would turn into "%0a%00".
|
// would turn into "%0a%00".
|
||||||
func (f Format) hexEscaped(delim []byte) string {
|
func (f Format) hexEscaped(delim []byte) string {
|
||||||
escaped := ""
|
var escaped strings.Builder
|
||||||
for i := 0; i < len(delim); i++ {
|
for i := range delim {
|
||||||
escaped += "%" + hex.EncodeToString([]byte{delim[i]})
|
escaped.WriteString("%" + hex.EncodeToString([]byte{delim[i]}))
|
||||||
}
|
}
|
||||||
return escaped
|
return escaped.String()
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -9,6 +9,7 @@ import (
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"slices"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"forgejo.org/modules/log"
|
"forgejo.org/modules/log"
|
||||||
|
|
@ -27,12 +28,7 @@ var ErrNotValidHook = errors.New("not a valid Git hook")
|
||||||
|
|
||||||
// IsValidHookName returns true if given name is a valid Git hook.
|
// IsValidHookName returns true if given name is a valid Git hook.
|
||||||
func IsValidHookName(name string) bool {
|
func IsValidHookName(name string) bool {
|
||||||
for _, hn := range hookNames {
|
return slices.Contains(hookNames, name)
|
||||||
if hn == name {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Hook represents a Git hook.
|
// Hook represents a Git hook.
|
||||||
|
|
|
||||||
|
|
@ -21,7 +21,7 @@ type Cache interface {
|
||||||
}
|
}
|
||||||
|
|
||||||
func getCacheKey(repoPath, commitID, entryPath string) string {
|
func getCacheKey(repoPath, commitID, entryPath string) string {
|
||||||
hashBytes := sha256.Sum256([]byte(fmt.Sprintf("%s:%s:%s", repoPath, commitID, entryPath)))
|
hashBytes := sha256.Sum256(fmt.Appendf(nil, "%s:%s:%s", repoPath, commitID, entryPath))
|
||||||
return fmt.Sprintf("last_commit:%x", hashBytes)
|
return fmt.Sprintf("last_commit:%x", hashBytes)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -346,10 +346,7 @@ func WalkGitLog(ctx context.Context, repo *Repository, head *Commit, treepath st
|
||||||
|
|
||||||
results := make([]string, len(paths))
|
results := make([]string, len(paths))
|
||||||
remaining := len(paths)
|
remaining := len(paths)
|
||||||
nextRestart := (len(paths) * 3) / 4
|
nextRestart := min((len(paths)*3)/4, 70)
|
||||||
if nextRestart > 70 {
|
|
||||||
nextRestart = 70
|
|
||||||
}
|
|
||||||
lastEmptyParent := head.ID.String()
|
lastEmptyParent := head.ID.String()
|
||||||
commitSinceLastEmptyParent := uint64(0)
|
commitSinceLastEmptyParent := uint64(0)
|
||||||
commitSinceNextRestart := uint64(0)
|
commitSinceNextRestart := uint64(0)
|
||||||
|
|
|
||||||
|
|
@ -8,6 +8,7 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"forgejo.org/modules/log"
|
"forgejo.org/modules/log"
|
||||||
)
|
)
|
||||||
|
|
@ -33,7 +34,7 @@ func GetNote(ctx context.Context, repo *Repository, commitID string) (*Note, err
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
path := ""
|
var path strings.Builder
|
||||||
|
|
||||||
tree := ¬es.Tree
|
tree := ¬es.Tree
|
||||||
log.Trace("Found tree with ID %q while searching for git note corresponding to the commit %q", tree.ID, commitID)
|
log.Trace("Found tree with ID %q while searching for git note corresponding to the commit %q", tree.ID, commitID)
|
||||||
|
|
@ -43,12 +44,12 @@ func GetNote(ctx context.Context, repo *Repository, commitID string) (*Note, err
|
||||||
for len(commitID) > 2 {
|
for len(commitID) > 2 {
|
||||||
entry, err = tree.GetTreeEntryByPath(commitID)
|
entry, err = tree.GetTreeEntryByPath(commitID)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
path += commitID
|
path.WriteString(commitID)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if IsErrNotExist(err) {
|
if IsErrNotExist(err) {
|
||||||
tree, err = tree.SubTree(commitID[0:2])
|
tree, err = tree.SubTree(commitID[0:2])
|
||||||
path += commitID[0:2] + "/"
|
path.WriteString(commitID[0:2] + "/")
|
||||||
commitID = commitID[2:]
|
commitID = commitID[2:]
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
@ -80,9 +81,9 @@ func GetNote(ctx context.Context, repo *Repository, commitID string) (*Note, err
|
||||||
_ = dataRc.Close()
|
_ = dataRc.Close()
|
||||||
closed = true
|
closed = true
|
||||||
|
|
||||||
lastCommit, err := repo.getCommitByPathWithID(notes.ID, path)
|
lastCommit, err := repo.getCommitByPathWithID(notes.ID, path.String())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("Unable to get the commit for the path %q. Error: %v", path, err)
|
log.Error("Unable to get the commit for the path %q. Error: %v", path.String(), err)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -33,16 +33,16 @@ func parseTreeEntries(data []byte, ptree *Tree) ([]*TreeEntry, error) {
|
||||||
posEnd += pos
|
posEnd += pos
|
||||||
}
|
}
|
||||||
line := data[pos:posEnd]
|
line := data[pos:posEnd]
|
||||||
posTab := bytes.IndexByte(line, '\t')
|
before, after, ok := bytes.Cut(line, []byte{'\t'})
|
||||||
if posTab == -1 {
|
if !ok {
|
||||||
return nil, fmt.Errorf("invalid ls-tree output (no tab): %q", line)
|
return nil, fmt.Errorf("invalid ls-tree output (no tab): %q", line)
|
||||||
}
|
}
|
||||||
|
|
||||||
entry := new(TreeEntry)
|
entry := new(TreeEntry)
|
||||||
entry.ptree = ptree
|
entry.ptree = ptree
|
||||||
|
|
||||||
entryAttrs := line[:posTab]
|
entryAttrs := before
|
||||||
entryName := line[posTab+1:]
|
entryName := after
|
||||||
|
|
||||||
entryMode, entryAttrs, _ := bytes.Cut(entryAttrs, sepSpace)
|
entryMode, entryAttrs, _ := bytes.Cut(entryAttrs, sepSpace)
|
||||||
_ /* entryType */, entryAttrs, _ = bytes.Cut(entryAttrs, sepSpace) // the type is not used, the mode is enough to determine the type
|
_ /* entryType */, entryAttrs, _ = bytes.Cut(entryAttrs, sepSpace) // the type is not used, the mode is enough to determine the type
|
||||||
|
|
|
||||||
|
|
@ -52,7 +52,7 @@ func NewFromMap(o *map[string]string) Interface {
|
||||||
|
|
||||||
func (o *gitPushOptions) ReadEnv() Interface {
|
func (o *gitPushOptions) ReadEnv() Interface {
|
||||||
if pushCount, err := strconv.Atoi(os.Getenv(EnvCount)); err == nil {
|
if pushCount, err := strconv.Atoi(os.Getenv(EnvCount)); err == nil {
|
||||||
for idx := 0; idx < pushCount; idx++ {
|
for idx := range pushCount {
|
||||||
_ = o.Parse(os.Getenv(fmt.Sprintf(EnvFormat, idx)))
|
_ = o.Parse(os.Getenv(fmt.Sprintf(EnvFormat, idx)))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -105,8 +105,8 @@ func (ref RefName) IsFor() bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ref RefName) nameWithoutPrefix(prefix string) string {
|
func (ref RefName) nameWithoutPrefix(prefix string) string {
|
||||||
if strings.HasPrefix(string(ref), prefix) {
|
if after, ok := strings.CutPrefix(string(ref), prefix); ok {
|
||||||
return strings.TrimPrefix(string(ref), prefix)
|
return after
|
||||||
}
|
}
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -46,9 +46,9 @@ func (repo *Repository) parsePrettyFormatLogToList(logs []byte) ([]*Commit, erro
|
||||||
return commits, nil
|
return commits, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
parts := bytes.Split(logs, []byte{'\n'})
|
parts := bytes.SplitSeq(logs, []byte{'\n'})
|
||||||
|
|
||||||
for _, commitID := range parts {
|
for commitID := range parts {
|
||||||
commit, err := repo.GetCommit(string(commitID))
|
commit, err := repo.GetCommit(string(commitID))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
|
||||||
|
|
@ -96,8 +96,8 @@ func (ca GitAttribute) String() string {
|
||||||
// sometimes used within gitlab-language: https://docs.gitlab.com/ee/user/project/highlighting.html#override-syntax-highlighting-for-a-file-type
|
// sometimes used within gitlab-language: https://docs.gitlab.com/ee/user/project/highlighting.html#override-syntax-highlighting-for-a-file-type
|
||||||
func (ca GitAttribute) Prefix() string {
|
func (ca GitAttribute) Prefix() string {
|
||||||
s := ca.String()
|
s := ca.String()
|
||||||
if i := strings.IndexByte(s, '?'); i >= 0 {
|
if before, _, ok := strings.Cut(s, "?"); ok {
|
||||||
return s[:i]
|
return before
|
||||||
}
|
}
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -95,7 +95,7 @@ func (repo *Repository) LsFiles(filenames ...string) ([]string, error) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
filelist := make([]string, 0, len(filenames))
|
filelist := make([]string, 0, len(filenames))
|
||||||
for _, line := range bytes.Split(res, []byte{'\000'}) {
|
for line := range bytes.SplitSeq(res, []byte{'\000'}) {
|
||||||
filelist = append(filelist, string(line))
|
filelist = append(filelist, string(line))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -42,8 +42,8 @@ func (repo *Repository) GetTagNameBySHA(sha string) (string, error) {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
tagRefs := strings.Split(stdout, "\n")
|
tagRefs := strings.SplitSeq(stdout, "\n")
|
||||||
for _, tagRef := range tagRefs {
|
for tagRef := range tagRefs {
|
||||||
if len(strings.TrimSpace(tagRef)) > 0 {
|
if len(strings.TrimSpace(tagRef)) > 0 {
|
||||||
fields := strings.Fields(tagRef)
|
fields := strings.Fields(tagRef)
|
||||||
if strings.HasPrefix(fields[0], sha) && strings.HasPrefix(fields[1], TagPrefix) {
|
if strings.HasPrefix(fields[0], sha) && strings.HasPrefix(fields[1], TagPrefix) {
|
||||||
|
|
@ -65,7 +65,7 @@ func (repo *Repository) GetTagID(name string) (string, error) {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
// Make sure exact match is used: "v1" != "release/v1"
|
// Make sure exact match is used: "v1" != "release/v1"
|
||||||
for _, line := range strings.Split(stdout, "\n") {
|
for line := range strings.SplitSeq(stdout, "\n") {
|
||||||
fields := strings.Fields(line)
|
fields := strings.Fields(line)
|
||||||
if len(fields) == 2 && fields[1] == "refs/tags/"+name {
|
if len(fields) == 2 && fields[1] == "refs/tags/"+name {
|
||||||
return fields[0], nil
|
return fields[0], nil
|
||||||
|
|
|
||||||
|
|
@ -50,20 +50,20 @@ l:
|
||||||
switch {
|
switch {
|
||||||
case eol > 0:
|
case eol > 0:
|
||||||
line := data[nextline : nextline+eol]
|
line := data[nextline : nextline+eol]
|
||||||
spacepos := bytes.IndexByte(line, ' ')
|
before, after, _ := bytes.Cut(line, []byte{' '})
|
||||||
reftype := line[:spacepos]
|
reftype := before
|
||||||
switch string(reftype) {
|
switch string(reftype) {
|
||||||
case "object":
|
case "object":
|
||||||
id, err := NewIDFromString(string(line[spacepos+1:]))
|
id, err := NewIDFromString(string(after))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
tag.Object = id
|
tag.Object = id
|
||||||
case "type":
|
case "type":
|
||||||
// A commit can have one or more parents
|
// A commit can have one or more parents
|
||||||
tag.Type = string(line[spacepos+1:])
|
tag.Type = string(after)
|
||||||
case "tagger":
|
case "tagger":
|
||||||
tag.Tagger = parseSignatureFromCommitLine(util.UnsafeBytesToString(line[spacepos+1:]))
|
tag.Tagger = parseSignatureFromCommitLine(util.UnsafeBytesToString(after))
|
||||||
}
|
}
|
||||||
nextline += eol + 1
|
nextline += eol + 1
|
||||||
case eol == 0:
|
case eol == 0:
|
||||||
|
|
|
||||||
|
|
@ -170,7 +170,7 @@ func (repo *Repository) LsTree(ref string, filenames ...string) ([]string, error
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
filelist := make([]string, 0, len(filenames))
|
filelist := make([]string, 0, len(filenames))
|
||||||
for _, line := range bytes.Split(res, []byte{'\000'}) {
|
for line := range bytes.SplitSeq(res, []byte{'\000'}) {
|
||||||
filelist = append(filelist, string(line))
|
filelist = append(filelist, string(line))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -171,7 +171,7 @@ func (te *TreeEntry) FollowLinks() (*TreeEntry, string, error) {
|
||||||
}
|
}
|
||||||
entry := te
|
entry := te
|
||||||
entryLink := ""
|
entryLink := ""
|
||||||
for i := 0; i < 999; i++ {
|
for range 999 {
|
||||||
if entry.IsLink() {
|
if entry.IsLink() {
|
||||||
next, link, err := entry.FollowLink()
|
next, link, err := entry.FollowLink()
|
||||||
entryLink = link
|
entryLink = link
|
||||||
|
|
|
||||||
|
|
@ -20,7 +20,7 @@ func TestSubTree_Issue29101(t *testing.T) {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// old code could produce a different error if called multiple times
|
// old code could produce a different error if called multiple times
|
||||||
for i := 0; i < 10; i++ {
|
for range 10 {
|
||||||
_, err = commit.SubTree("file1.txt")
|
_, err = commit.SubTree("file1.txt")
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
assert.True(t, IsErrNotExist(err))
|
assert.True(t, IsErrNotExist(err))
|
||||||
|
|
|
||||||
|
|
@ -6,6 +6,7 @@ package hostmatcher
|
||||||
import (
|
import (
|
||||||
"net"
|
"net"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"slices"
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
@ -38,7 +39,7 @@ func isBuiltin(s string) bool {
|
||||||
// ParseHostMatchList parses the host list HostMatchList
|
// ParseHostMatchList parses the host list HostMatchList
|
||||||
func ParseHostMatchList(settingKeyHint, hostList string) *HostMatchList {
|
func ParseHostMatchList(settingKeyHint, hostList string) *HostMatchList {
|
||||||
hl := &HostMatchList{SettingKeyHint: settingKeyHint, SettingValue: hostList}
|
hl := &HostMatchList{SettingKeyHint: settingKeyHint, SettingValue: hostList}
|
||||||
for _, s := range strings.Split(hostList, ",") {
|
for s := range strings.SplitSeq(hostList, ",") {
|
||||||
s = strings.ToLower(strings.TrimSpace(s))
|
s = strings.ToLower(strings.TrimSpace(s))
|
||||||
if s == "" {
|
if s == "" {
|
||||||
continue
|
continue
|
||||||
|
|
@ -61,7 +62,7 @@ func ParseSimpleMatchList(settingKeyHint, matchList string) *HostMatchList {
|
||||||
SettingKeyHint: settingKeyHint,
|
SettingKeyHint: settingKeyHint,
|
||||||
SettingValue: matchList,
|
SettingValue: matchList,
|
||||||
}
|
}
|
||||||
for _, s := range strings.Split(matchList, ",") {
|
for s := range strings.SplitSeq(matchList, ",") {
|
||||||
s = strings.ToLower(strings.TrimSpace(s))
|
s = strings.ToLower(strings.TrimSpace(s))
|
||||||
if s == "" {
|
if s == "" {
|
||||||
continue
|
continue
|
||||||
|
|
@ -98,10 +99,8 @@ func (hl *HostMatchList) checkPattern(host string) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (hl *HostMatchList) checkIP(ip net.IP) bool {
|
func (hl *HostMatchList) checkIP(ip net.IP) bool {
|
||||||
for _, pattern := range hl.patterns {
|
if slices.Contains(hl.patterns, "*") {
|
||||||
if pattern == "*" {
|
return true
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
for _, builtin := range hl.builtins {
|
for _, builtin := range hl.builtins {
|
||||||
switch builtin {
|
switch builtin {
|
||||||
|
|
|
||||||
|
|
@ -59,7 +59,7 @@ func HandleGenericETagCache(req *http.Request, w http.ResponseWriter, etag strin
|
||||||
func checkIfNoneMatchIsValid(req *http.Request, etag string) bool {
|
func checkIfNoneMatchIsValid(req *http.Request, etag string) bool {
|
||||||
ifNoneMatch := req.Header.Get("If-None-Match")
|
ifNoneMatch := req.Header.Get("If-None-Match")
|
||||||
if len(ifNoneMatch) > 0 {
|
if len(ifNoneMatch) > 0 {
|
||||||
for _, item := range strings.Split(ifNoneMatch, ",") {
|
for item := range strings.SplitSeq(ifNoneMatch, ",") {
|
||||||
item = strings.TrimPrefix(strings.TrimSpace(item), "W/") // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/ETag#directives
|
item = strings.TrimPrefix(strings.TrimSpace(item), "W/") // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/ETag#directives
|
||||||
if item == etag {
|
if item == etag {
|
||||||
return true
|
return true
|
||||||
|
|
|
||||||
|
|
@ -8,6 +8,7 @@ import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"maps"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"path"
|
"path"
|
||||||
|
|
@ -86,9 +87,7 @@ func ServeSetHeaders(w http.ResponseWriter, opts *ServeHeaderOptions) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if opts.AdditionalHeaders != nil {
|
if opts.AdditionalHeaders != nil {
|
||||||
for k, v := range opts.AdditionalHeaders {
|
maps.Copy(header, opts.AdditionalHeaders)
|
||||||
header[k] = v
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -129,8 +129,8 @@ func nonGenesisChanges(ctx context.Context, repo *repo_model.Repository, revisio
|
||||||
changes.Updates = append(changes.Updates, updates...)
|
changes.Updates = append(changes.Updates, updates...)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
lines := strings.Split(stdout, "\n")
|
lines := strings.SplitSeq(stdout, "\n")
|
||||||
for _, line := range lines {
|
for line := range lines {
|
||||||
line = strings.TrimSpace(line)
|
line = strings.TrimSpace(line)
|
||||||
if len(line) == 0 {
|
if len(line) == 0 {
|
||||||
continue
|
continue
|
||||||
|
|
|
||||||
|
|
@ -8,6 +8,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/url"
|
"net/url"
|
||||||
"regexp"
|
"regexp"
|
||||||
|
"slices"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
|
@ -447,12 +448,7 @@ func (o *valuedOption) IsChecked() bool {
|
||||||
case api.IssueFormFieldTypeDropdown:
|
case api.IssueFormFieldTypeDropdown:
|
||||||
checks := strings.Split(o.field.Get(fmt.Sprintf("form-field-%s", o.field.ID)), ",")
|
checks := strings.Split(o.field.Get(fmt.Sprintf("form-field-%s", o.field.ID)), ",")
|
||||||
idx := strconv.Itoa(o.index)
|
idx := strconv.Itoa(o.index)
|
||||||
for _, v := range checks {
|
return slices.Contains(checks, idx)
|
||||||
if v == idx {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
case api.IssueFormFieldTypeCheckboxes:
|
case api.IssueFormFieldTypeCheckboxes:
|
||||||
return o.field.Get(fmt.Sprintf("form-field-%s-%d", o.field.ID, o.index)) == "on"
|
return o.field.Get(fmt.Sprintf("form-field-%s-%d", o.field.ID, o.index)) == "on"
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -72,7 +72,7 @@ func parseYamlFormat(fileName string, data []byte) ([]*Label, error) {
|
||||||
func parseLegacyFormat(fileName string, data []byte) ([]*Label, error) {
|
func parseLegacyFormat(fileName string, data []byte) ([]*Label, error) {
|
||||||
lines := strings.Split(string(data), "\n")
|
lines := strings.Split(string(data), "\n")
|
||||||
list := make([]*Label, 0, len(lines))
|
list := make([]*Label, 0, len(lines))
|
||||||
for i := 0; i < len(lines); i++ {
|
for i := range lines {
|
||||||
line := strings.TrimSpace(lines[i])
|
line := strings.TrimSpace(lines[i])
|
||||||
if len(line) == 0 {
|
if len(line) == 0 {
|
||||||
continue
|
continue
|
||||||
|
|
@ -108,7 +108,7 @@ func LoadTemplateDescription(fileName string) (string, error) {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := 0; i < len(list); i++ {
|
for i := range list {
|
||||||
if i > 0 {
|
if i > 0 {
|
||||||
buf.WriteString(", ")
|
buf.WriteString(", ")
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -208,7 +208,7 @@ func EventFormatTextMessage(mode *WriterMode, event *Event, msgFormat string, ms
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if hasColorValue {
|
if hasColorValue {
|
||||||
msg = []byte(fmt.Sprintf(msgFormat, msgArgs...))
|
msg = fmt.Appendf(nil, msgFormat, msgArgs...)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// try to reuse the pre-formatted simple text message
|
// try to reuse the pre-formatted simple text message
|
||||||
|
|
@ -227,8 +227,8 @@ func EventFormatTextMessage(mode *WriterMode, event *Event, msgFormat string, ms
|
||||||
buf = append(buf, msg...)
|
buf = append(buf, msg...)
|
||||||
|
|
||||||
if event.Stacktrace != "" && mode.StacktraceLevel <= event.Level {
|
if event.Stacktrace != "" && mode.StacktraceLevel <= event.Level {
|
||||||
lines := bytes.Split([]byte(event.Stacktrace), []byte("\n"))
|
lines := bytes.SplitSeq([]byte(event.Stacktrace), []byte("\n"))
|
||||||
for _, line := range lines {
|
for line := range lines {
|
||||||
buf = append(buf, "\n\t"...)
|
buf = append(buf, "\n\t"...)
|
||||||
buf = append(buf, line...)
|
buf = append(buf, line...)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -63,11 +63,9 @@ func TestConnLogger(t *testing.T) {
|
||||||
}
|
}
|
||||||
expected := fmt.Sprintf("%s%s %s:%d:%s [%c] %s\n", prefix, dateString, event.Filename, event.Line, event.Caller, strings.ToUpper(event.Level.String())[0], event.MsgSimpleText)
|
expected := fmt.Sprintf("%s%s %s:%d:%s [%c] %s\n", prefix, dateString, event.Filename, event.Line, event.Caller, strings.ToUpper(event.Level.String())[0], event.MsgSimpleText)
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
wg.Add(1)
|
wg.Go(func() {
|
||||||
go func() {
|
|
||||||
defer wg.Done()
|
|
||||||
listenReadAndClose(t, l, expected)
|
listenReadAndClose(t, l, expected)
|
||||||
}()
|
})
|
||||||
logger.SendLogEvent(&event)
|
logger.SendLogEvent(&event)
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -124,7 +124,7 @@ func FlagsFromString(from string, def ...uint32) Flags {
|
||||||
return Flags{defined: true, flags: def[0]}
|
return Flags{defined: true, flags: def[0]}
|
||||||
}
|
}
|
||||||
flags := uint32(0)
|
flags := uint32(0)
|
||||||
for _, flag := range strings.Split(strings.ToLower(from), ",") {
|
for flag := range strings.SplitSeq(strings.ToLower(from), ",") {
|
||||||
flags |= flagFromString[strings.TrimSpace(flag)]
|
flags |= flagFromString[strings.TrimSpace(flag)]
|
||||||
}
|
}
|
||||||
return Flags{defined: true, flags: flags}
|
return Flags{defined: true, flags: flags}
|
||||||
|
|
|
||||||
|
|
@ -33,11 +33,11 @@ func TestLevelMarshalUnmarshalJSON(t *testing.T) {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, INFO, testLevel.Level)
|
assert.Equal(t, INFO, testLevel.Level)
|
||||||
|
|
||||||
err = json.Unmarshal([]byte(fmt.Sprintf(`{"level":%d}`, 2)), &testLevel)
|
err = json.Unmarshal(fmt.Appendf(nil, `{"level":%d}`, 2), &testLevel)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, INFO, testLevel.Level)
|
assert.Equal(t, INFO, testLevel.Level)
|
||||||
|
|
||||||
err = json.Unmarshal([]byte(fmt.Sprintf(`{"level":%d}`, 10012)), &testLevel)
|
err = json.Unmarshal(fmt.Appendf(nil, `{"level":%d}`, 10012), &testLevel)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, INFO, testLevel.Level)
|
assert.Equal(t, INFO, testLevel.Level)
|
||||||
|
|
||||||
|
|
@ -52,5 +52,5 @@ func TestLevelMarshalUnmarshalJSON(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func makeTestLevelBytes(level string) []byte {
|
func makeTestLevelBytes(level string) []byte {
|
||||||
return []byte(fmt.Sprintf(`{"level":"%s"}`, level))
|
return fmt.Appendf(nil, `{"level":"%s"}`, level)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -80,8 +80,8 @@ func newFilePreview(ctx *RenderContext, node *html.Node, locale translation.Loca
|
||||||
filePath := node.Data[m[6]:m[7]]
|
filePath := node.Data[m[6]:m[7]]
|
||||||
hash := node.Data[m[8]:m[9]]
|
hash := node.Data[m[8]:m[9]]
|
||||||
urlFullSource := urlFull
|
urlFullSource := urlFull
|
||||||
if strings.HasSuffix(filePath, "?display=source") {
|
if before, ok := strings.CutSuffix(filePath, "?display=source"); ok {
|
||||||
filePath = strings.TrimSuffix(filePath, "?display=source")
|
filePath = before
|
||||||
} else if Type(filePath) != "" {
|
} else if Type(filePath) != "" {
|
||||||
urlFullSource = node.Data[m[0]:m[6]] + filePath + "?display=source#" + hash
|
urlFullSource = node.Data[m[0]:m[6]] + filePath + "?display=source#" + hash
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -11,6 +11,7 @@ import (
|
||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"regexp"
|
"regexp"
|
||||||
|
"slices"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
|
|
@ -124,13 +125,7 @@ func CustomLinkURLSchemes(schemes []string) {
|
||||||
if !validScheme.MatchString(s) {
|
if !validScheme.MatchString(s) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
without := false
|
without := slices.Contains(xurls.SchemesNoAuthority, s)
|
||||||
for _, sna := range xurls.SchemesNoAuthority {
|
|
||||||
if s == sna {
|
|
||||||
without = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if without {
|
if without {
|
||||||
s += ":"
|
s += ":"
|
||||||
} else {
|
} else {
|
||||||
|
|
@ -675,9 +670,9 @@ func shortLinkProcessor(ctx *RenderContext, node *html.Node) {
|
||||||
// It makes page handling terrible, but we prefer GitHub syntax
|
// It makes page handling terrible, but we prefer GitHub syntax
|
||||||
// And fall back to MediaWiki only when it is obvious from the look
|
// And fall back to MediaWiki only when it is obvious from the look
|
||||||
// Of text and link contents
|
// Of text and link contents
|
||||||
sl := strings.Split(content, "|")
|
sl := strings.SplitSeq(content, "|")
|
||||||
for _, v := range sl {
|
for v := range sl {
|
||||||
if equalPos := strings.IndexByte(v, '='); equalPos == -1 {
|
if found := strings.Contains(v, "="); !found {
|
||||||
// There is no equal in this argument; this is a mandatory arg
|
// There is no equal in this argument; this is a mandatory arg
|
||||||
if props["name"] == "" {
|
if props["name"] == "" {
|
||||||
if IsLinkStr(v) {
|
if IsLinkStr(v) {
|
||||||
|
|
@ -699,8 +694,8 @@ func shortLinkProcessor(ctx *RenderContext, node *html.Node) {
|
||||||
} else {
|
} else {
|
||||||
// There is an equal; optional argument.
|
// There is an equal; optional argument.
|
||||||
|
|
||||||
sep := strings.IndexByte(v, '=')
|
before, after, _ := strings.Cut(v, "=")
|
||||||
key, val := v[:sep], html.UnescapeString(v[sep+1:])
|
key, val := before, html.UnescapeString(after)
|
||||||
|
|
||||||
// When parsing HTML, x/net/html will change all quotes which are
|
// When parsing HTML, x/net/html will change all quotes which are
|
||||||
// not used for syntax into UTF-8 quotes. So checking val[0] won't
|
// not used for syntax into UTF-8 quotes. So checking val[0] won't
|
||||||
|
|
@ -1148,7 +1143,7 @@ func comparePatternProcessor(ctx *RenderContext, node *html.Node) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure that every group (m[0]...m[9]) has a match
|
// Ensure that every group (m[0]...m[9]) has a match
|
||||||
for i := 0; i < 10; i++ {
|
for i := range 10 {
|
||||||
if m[i] == -1 {
|
if m[i] == -1 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -182,10 +182,7 @@ func actualRender(ctx *markup.RenderContext, input io.Reader, output io.Writer)
|
||||||
}
|
}
|
||||||
buf, _ = ExtractMetadataBytes(buf, rc)
|
buf, _ = ExtractMetadataBytes(buf, rc)
|
||||||
|
|
||||||
metaLength := bufWithMetadataLength - len(buf)
|
metaLength := max(bufWithMetadataLength-len(buf), 0)
|
||||||
if metaLength < 0 {
|
|
||||||
metaLength = 0
|
|
||||||
}
|
|
||||||
rc.metaLength = metaLength
|
rc.metaLength = metaLength
|
||||||
|
|
||||||
pc.Set(markdownutil.RenderConfigKey, rc)
|
pc.Set(markdownutil.RenderConfigKey, rc)
|
||||||
|
|
|
||||||
|
|
@ -319,7 +319,7 @@ func TestTotal_RenderWiki(t *testing.T) {
|
||||||
|
|
||||||
answers := testAnswers(util.URLJoin(FullURL, "wiki"), util.URLJoin(FullURL, "wiki", "raw"))
|
answers := testAnswers(util.URLJoin(FullURL, "wiki"), util.URLJoin(FullURL, "wiki", "raw"))
|
||||||
|
|
||||||
for i := 0; i < len(sameCases); i++ {
|
for i := range sameCases {
|
||||||
line, err := markdown.RenderString(&markup.RenderContext{
|
line, err := markdown.RenderString(&markup.RenderContext{
|
||||||
Ctx: git.DefaultContext,
|
Ctx: git.DefaultContext,
|
||||||
Links: markup.Links{
|
Links: markup.Links{
|
||||||
|
|
@ -363,7 +363,7 @@ func TestTotal_RenderString(t *testing.T) {
|
||||||
|
|
||||||
answers := testAnswers(util.URLJoin(FullURL, "src", "master"), util.URLJoin(FullURL, "media", "master"))
|
answers := testAnswers(util.URLJoin(FullURL, "src", "master"), util.URLJoin(FullURL, "media", "master"))
|
||||||
|
|
||||||
for i := 0; i < len(sameCases); i++ {
|
for i := range sameCases {
|
||||||
line, err := markdown.RenderString(&markup.RenderContext{
|
line, err := markdown.RenderString(&markup.RenderContext{
|
||||||
Ctx: git.DefaultContext,
|
Ctx: git.DefaultContext,
|
||||||
Links: markup.Links{
|
Links: markup.Links{
|
||||||
|
|
|
||||||
|
|
@ -24,7 +24,7 @@ func (r *BlockRenderer) RegisterFuncs(reg renderer.NodeRendererFuncRegisterer) {
|
||||||
|
|
||||||
func (r *BlockRenderer) writeLines(w util.BufWriter, source []byte, n gast.Node) {
|
func (r *BlockRenderer) writeLines(w util.BufWriter, source []byte, n gast.Node) {
|
||||||
l := n.Lines().Len()
|
l := n.Lines().Len()
|
||||||
for i := 0; i < l; i++ {
|
for i := range l {
|
||||||
line := n.Lines().At(i)
|
line := n.Lines().At(i)
|
||||||
_, _ = w.Write(util.EscapeHTML(line.Value(source)))
|
_, _ = w.Write(util.EscapeHTML(line.Value(source)))
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -63,7 +63,7 @@ func TestExtractMetadata(t *testing.T) {
|
||||||
func TestExtractMetadataBytes(t *testing.T) {
|
func TestExtractMetadataBytes(t *testing.T) {
|
||||||
t.Run("ValidFrontAndBody", func(t *testing.T) {
|
t.Run("ValidFrontAndBody", func(t *testing.T) {
|
||||||
var meta IssueTemplate
|
var meta IssueTemplate
|
||||||
body, err := ExtractMetadataBytes([]byte(fmt.Sprintf("%s\n%s\n%s\n%s", sepTest, frontTest, sepTest, bodyTest)), &meta)
|
body, err := ExtractMetadataBytes(fmt.Appendf(nil, "%s\n%s\n%s\n%s", sepTest, frontTest, sepTest, bodyTest), &meta)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, bodyTest, string(body))
|
assert.Equal(t, bodyTest, string(body))
|
||||||
assert.Equal(t, metaTest, meta)
|
assert.Equal(t, metaTest, meta)
|
||||||
|
|
@ -72,19 +72,19 @@ func TestExtractMetadataBytes(t *testing.T) {
|
||||||
|
|
||||||
t.Run("NoFirstSeparator", func(t *testing.T) {
|
t.Run("NoFirstSeparator", func(t *testing.T) {
|
||||||
var meta IssueTemplate
|
var meta IssueTemplate
|
||||||
_, err := ExtractMetadataBytes([]byte(fmt.Sprintf("%s\n%s\n%s", frontTest, sepTest, bodyTest)), &meta)
|
_, err := ExtractMetadataBytes(fmt.Appendf(nil, "%s\n%s\n%s", frontTest, sepTest, bodyTest), &meta)
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("NoLastSeparator", func(t *testing.T) {
|
t.Run("NoLastSeparator", func(t *testing.T) {
|
||||||
var meta IssueTemplate
|
var meta IssueTemplate
|
||||||
_, err := ExtractMetadataBytes([]byte(fmt.Sprintf("%s\n%s\n%s", sepTest, frontTest, bodyTest)), &meta)
|
_, err := ExtractMetadataBytes(fmt.Appendf(nil, "%s\n%s\n%s", sepTest, frontTest, bodyTest), &meta)
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("NoBody", func(t *testing.T) {
|
t.Run("NoBody", func(t *testing.T) {
|
||||||
var meta IssueTemplate
|
var meta IssueTemplate
|
||||||
body, err := ExtractMetadataBytes([]byte(fmt.Sprintf("%s\n%s\n%s", sepTest, frontTest, sepTest)), &meta)
|
body, err := ExtractMetadataBytes(fmt.Appendf(nil, "%s\n%s\n%s", sepTest, frontTest, sepTest), &meta)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Empty(t, string(body))
|
assert.Empty(t, string(body))
|
||||||
assert.Equal(t, metaTest, meta)
|
assert.Equal(t, metaTest, meta)
|
||||||
|
|
|
||||||
|
|
@ -44,7 +44,7 @@ func createTOCNode(toc []markup.Header, lang string, detailsAttrs map[string]str
|
||||||
}
|
}
|
||||||
li := ast.NewListItem(currentLevel * 2)
|
li := ast.NewListItem(currentLevel * 2)
|
||||||
a := ast.NewLink()
|
a := ast.NewLink()
|
||||||
a.Destination = []byte(fmt.Sprintf("#%s", url.QueryEscape(header.ID)))
|
a.Destination = fmt.Appendf(nil, "#%s", url.QueryEscape(header.ID))
|
||||||
a.AppendChild(a, ast.NewString([]byte(header.Text)))
|
a.AppendChild(a, ast.NewString([]byte(header.Text)))
|
||||||
li.AppendChild(li, a)
|
li.AppendChild(li, a)
|
||||||
ul.AppendChild(ul, li)
|
ul.AppendChild(ul, li)
|
||||||
|
|
|
||||||
|
|
@ -17,7 +17,7 @@ import (
|
||||||
func (g *ASTTransformer) transformHeading(_ *markup.RenderContext, v *ast.Heading, reader text.Reader, tocList *[]markup.Header) {
|
func (g *ASTTransformer) transformHeading(_ *markup.RenderContext, v *ast.Heading, reader text.Reader, tocList *[]markup.Header) {
|
||||||
for _, attr := range v.Attributes() {
|
for _, attr := range v.Attributes() {
|
||||||
if _, ok := attr.Value.([]byte); !ok {
|
if _, ok := attr.Value.([]byte); !ok {
|
||||||
v.SetAttribute(attr.Name, []byte(fmt.Sprintf("%v", attr.Value)))
|
v.SetAttribute(attr.Name, fmt.Appendf(nil, "%v", attr.Value))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
txt := mdutil.Text(v, reader.Source())
|
txt := mdutil.Text(v, reader.Source())
|
||||||
|
|
|
||||||
|
|
@ -319,23 +319,19 @@ func render(ctx *RenderContext, renderer Renderer, input io.Reader, output io.Wr
|
||||||
_ = pw2.Close()
|
_ = pw2.Close()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
wg.Add(1)
|
wg.Go(func() {
|
||||||
go func() {
|
|
||||||
err = donotpanic.SafeFuncWithError(func() error { return SanitizeReader(pr2, renderer.Name(), output) })
|
err = donotpanic.SafeFuncWithError(func() error { return SanitizeReader(pr2, renderer.Name(), output) })
|
||||||
_ = pr2.Close()
|
_ = pr2.Close()
|
||||||
wg.Done()
|
})
|
||||||
}()
|
|
||||||
} else {
|
} else {
|
||||||
pw2 = nopCloser{output}
|
pw2 = nopCloser{output}
|
||||||
}
|
}
|
||||||
|
|
||||||
wg.Add(1)
|
wg.Go(func() {
|
||||||
go func() {
|
|
||||||
err = donotpanic.SafeFuncWithError(func() error { return postProcessOrCopy(ctx, renderer, pr, pw2) })
|
err = donotpanic.SafeFuncWithError(func() error { return postProcessOrCopy(ctx, renderer, pr, pw2) })
|
||||||
_ = pr.Close()
|
_ = pr.Close()
|
||||||
_ = pw2.Close()
|
_ = pw2.Close()
|
||||||
wg.Done()
|
})
|
||||||
}()
|
|
||||||
|
|
||||||
if err1 := renderer.Render(ctx, input, pw); err1 != nil {
|
if err1 := renderer.Render(ctx, input, pw); err1 != nil {
|
||||||
return err1
|
return err1
|
||||||
|
|
|
||||||
|
|
@ -58,7 +58,7 @@ type PackageMetadata struct {
|
||||||
Time map[string]time.Time `json:"time,omitempty"`
|
Time map[string]time.Time `json:"time,omitempty"`
|
||||||
Homepage string `json:"homepage,omitempty"`
|
Homepage string `json:"homepage,omitempty"`
|
||||||
Keywords []string `json:"keywords,omitempty"`
|
Keywords []string `json:"keywords,omitempty"`
|
||||||
Repository Repository `json:"repository,omitempty"`
|
Repository Repository `json:"repository"`
|
||||||
Author User `json:"author"`
|
Author User `json:"author"`
|
||||||
ReadmeFilename string `json:"readmeFilename,omitempty"`
|
ReadmeFilename string `json:"readmeFilename,omitempty"`
|
||||||
Users map[string]bool `json:"users,omitempty"`
|
Users map[string]bool `json:"users,omitempty"`
|
||||||
|
|
@ -75,7 +75,7 @@ type PackageMetadataVersion struct {
|
||||||
Author User `json:"author"`
|
Author User `json:"author"`
|
||||||
Homepage string `json:"homepage,omitempty"`
|
Homepage string `json:"homepage,omitempty"`
|
||||||
License string `json:"license,omitempty"`
|
License string `json:"license,omitempty"`
|
||||||
Repository Repository `json:"repository,omitempty"`
|
Repository Repository `json:"repository"`
|
||||||
Keywords []string `json:"keywords,omitempty"`
|
Keywords []string `json:"keywords,omitempty"`
|
||||||
Dependencies map[string]string `json:"dependencies,omitempty"`
|
Dependencies map[string]string `json:"dependencies,omitempty"`
|
||||||
BundleDependencies []string `json:"bundleDependencies,omitempty"`
|
BundleDependencies []string `json:"bundleDependencies,omitempty"`
|
||||||
|
|
|
||||||
|
|
@ -22,5 +22,5 @@ type Metadata struct {
|
||||||
OptionalDependencies map[string]string `json:"optional_dependencies,omitempty"`
|
OptionalDependencies map[string]string `json:"optional_dependencies,omitempty"`
|
||||||
Bin map[string]string `json:"bin,omitempty"`
|
Bin map[string]string `json:"bin,omitempty"`
|
||||||
Readme string `json:"readme,omitempty"`
|
Readme string `json:"readme,omitempty"`
|
||||||
Repository Repository `json:"repository,omitempty"`
|
Repository Repository `json:"repository"`
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -142,8 +142,8 @@ func ParseDebugHeaderID(r io.ReadSeeker) (string, error) {
|
||||||
if _, err := r.Read(b); err != nil {
|
if _, err := r.Read(b); err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
if i := bytes.IndexByte(b, 0); i != -1 {
|
if before, _, ok := bytes.Cut(b, []byte{0}); ok {
|
||||||
buf.Write(b[:i])
|
buf.Write(before)
|
||||||
return buf.String(), nil
|
return buf.String(), nil
|
||||||
}
|
}
|
||||||
buf.Write(b)
|
buf.Write(b)
|
||||||
|
|
|
||||||
|
|
@ -91,7 +91,7 @@ func (e *MarshalEncoder) marshal(v any) error {
|
||||||
val := reflect.ValueOf(v)
|
val := reflect.ValueOf(v)
|
||||||
typ := reflect.TypeOf(v)
|
typ := reflect.TypeOf(v)
|
||||||
|
|
||||||
if typ.Kind() == reflect.Ptr {
|
if typ.Kind() == reflect.Pointer {
|
||||||
val = val.Elem()
|
val = val.Elem()
|
||||||
typ = typ.Elem()
|
typ = typ.Elem()
|
||||||
}
|
}
|
||||||
|
|
@ -250,7 +250,7 @@ func (e *MarshalEncoder) marshalArray(arr reflect.Value) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := 0; i < length; i++ {
|
for i := range length {
|
||||||
if err := e.marshal(arr.Index(i).Interface()); err != nil {
|
if err := e.marshal(arr.Index(i).Interface()); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -47,7 +47,7 @@ type Metadata struct {
|
||||||
Keywords []string `json:"keywords,omitempty"`
|
Keywords []string `json:"keywords,omitempty"`
|
||||||
RepositoryURL string `json:"repository_url,omitempty"`
|
RepositoryURL string `json:"repository_url,omitempty"`
|
||||||
License string `json:"license,omitempty"`
|
License string `json:"license,omitempty"`
|
||||||
Author Person `json:"author,omitempty"`
|
Author Person `json:"author"`
|
||||||
Manifests map[string]*Manifest `json:"manifests,omitempty"`
|
Manifests map[string]*Manifest `json:"manifests,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -7,6 +7,7 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
|
||||||
asymkey_model "forgejo.org/models/asymkey"
|
asymkey_model "forgejo.org/models/asymkey"
|
||||||
"forgejo.org/models/perm"
|
"forgejo.org/models/perm"
|
||||||
|
|
@ -47,17 +48,18 @@ type ServCommandResults struct {
|
||||||
|
|
||||||
// ServCommand preps for a serv call
|
// ServCommand preps for a serv call
|
||||||
func ServCommand(ctx context.Context, keyID int64, ownerName, repoName string, mode perm.AccessMode, verbs ...string) (*ServCommandResults, ResponseExtra) {
|
func ServCommand(ctx context.Context, keyID int64, ownerName, repoName string, mode perm.AccessMode, verbs ...string) (*ServCommandResults, ResponseExtra) {
|
||||||
reqURL := setting.LocalURL + fmt.Sprintf("api/internal/serv/command/%d/%s/%s?mode=%d",
|
var reqURL strings.Builder
|
||||||
|
fmt.Fprintf(&reqURL, "%sapi/internal/serv/command/%d/%s/%s?mode=%d",
|
||||||
|
setting.LocalURL,
|
||||||
keyID,
|
keyID,
|
||||||
url.PathEscape(ownerName),
|
url.PathEscape(ownerName),
|
||||||
url.PathEscape(repoName),
|
url.PathEscape(repoName),
|
||||||
mode,
|
mode)
|
||||||
)
|
|
||||||
for _, verb := range verbs {
|
for _, verb := range verbs {
|
||||||
if verb != "" {
|
if verb != "" {
|
||||||
reqURL += fmt.Sprintf("&verb=%s", url.QueryEscape(verb))
|
fmt.Fprintf(&reqURL, "&verb=%s", url.QueryEscape(verb))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
req := newInternalRequest(ctx, reqURL, "GET")
|
req := newInternalRequest(ctx, reqURL.String(), "GET")
|
||||||
return requestJSONResp(req, &ServCommandResults{})
|
return requestJSONResp(req, &ServCommandResults{})
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -45,7 +45,7 @@ func FileHandlerFunc() http.HandlerFunc {
|
||||||
func parseAcceptEncoding(val string) container.Set[string] {
|
func parseAcceptEncoding(val string) container.Set[string] {
|
||||||
parts := strings.Split(val, ";")
|
parts := strings.Split(val, ";")
|
||||||
types := make(container.Set[string])
|
types := make(container.Set[string])
|
||||||
for _, v := range strings.Split(parts[0], ",") {
|
for v := range strings.SplitSeq(parts[0], ",") {
|
||||||
types.Add(strings.TrimSpace(v))
|
types.Add(strings.TrimSpace(v))
|
||||||
}
|
}
|
||||||
return types
|
return types
|
||||||
|
|
|
||||||
|
|
@ -83,7 +83,7 @@ func prepareLevelDB(cfg *BaseConfig) (conn string, db *leveldb.DB, err error) {
|
||||||
}
|
}
|
||||||
conn = cfg.ConnStr
|
conn = cfg.ConnStr
|
||||||
}
|
}
|
||||||
for i := 0; i < 10; i++ {
|
for range 10 {
|
||||||
if db, err = nosql.GetManager().GetLevelDB(conn); err == nil {
|
if db, err = nosql.GetManager().GetLevelDB(conn); err == nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -49,7 +49,7 @@ func newBaseRedisGeneric(cfg *BaseConfig, unique bool, client nosql.RedisClient)
|
||||||
}
|
}
|
||||||
|
|
||||||
var err error
|
var err error
|
||||||
for i := 0; i < 10; i++ {
|
for range 10 {
|
||||||
err = client.Ping(graceful.GetManager().ShutdownContext()).Err()
|
err = client.Ping(graceful.GetManager().ShutdownContext()).Err()
|
||||||
if err == nil {
|
if err == nil {
|
||||||
break
|
break
|
||||||
|
|
|
||||||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue