2017-04-21 20:32:31 +09:00
|
|
|
// Copyright 2017 Gitea. All rights reserved.
|
2022-11-28 03:20:29 +09:00
|
|
|
// SPDX-License-Identifier: MIT
|
2017-04-21 20:32:31 +09:00
|
|
|
|
2022-06-13 00:51:54 +09:00
|
|
|
package git
|
2017-04-21 20:32:31 +09:00
|
|
|
|
|
|
|
import (
|
2021-12-10 10:27:50 +09:00
|
|
|
"context"
|
2019-06-30 16:57:59 +09:00
|
|
|
"crypto/sha1"
|
2022-12-01 01:41:49 +09:00
|
|
|
"errors"
|
2017-04-21 20:32:31 +09:00
|
|
|
"fmt"
|
2021-11-17 03:18:25 +09:00
|
|
|
"net/url"
|
2022-12-02 12:15:36 +09:00
|
|
|
"strconv"
|
2017-04-21 20:32:31 +09:00
|
|
|
"strings"
|
2019-09-18 14:39:45 +09:00
|
|
|
"time"
|
2017-04-21 20:32:31 +09:00
|
|
|
|
2021-12-10 17:14:24 +09:00
|
|
|
asymkey_model "code.gitea.io/gitea/models/asymkey"
|
2021-09-19 20:49:59 +09:00
|
|
|
"code.gitea.io/gitea/models/db"
|
2021-12-10 10:27:50 +09:00
|
|
|
repo_model "code.gitea.io/gitea/models/repo"
|
2021-11-24 18:49:20 +09:00
|
|
|
user_model "code.gitea.io/gitea/models/user"
|
2022-06-13 00:51:54 +09:00
|
|
|
"code.gitea.io/gitea/modules/git"
|
2017-04-21 20:32:31 +09:00
|
|
|
"code.gitea.io/gitea/modules/log"
|
|
|
|
"code.gitea.io/gitea/modules/setting"
|
2019-05-11 19:21:34 +09:00
|
|
|
api "code.gitea.io/gitea/modules/structs"
|
2019-08-15 23:46:21 +09:00
|
|
|
"code.gitea.io/gitea/modules/timeutil"
|
2023-08-21 16:26:10 +09:00
|
|
|
"code.gitea.io/gitea/modules/translation"
|
2019-07-25 19:55:06 +09:00
|
|
|
|
2023-05-14 06:59:01 +09:00
|
|
|
"xorm.io/builder"
|
2017-04-21 20:32:31 +09:00
|
|
|
)
|
|
|
|
|
|
|
|
// CommitStatus holds a single Status of a single Commit
|
|
|
|
type CommitStatus struct {
|
2021-12-10 10:27:50 +09:00
|
|
|
ID int64 `xorm:"pk autoincr"`
|
|
|
|
Index int64 `xorm:"INDEX UNIQUE(repo_sha_index)"`
|
|
|
|
RepoID int64 `xorm:"INDEX UNIQUE(repo_sha_index)"`
|
|
|
|
Repo *repo_model.Repository `xorm:"-"`
|
|
|
|
State api.CommitStatusState `xorm:"VARCHAR(7) NOT NULL"`
|
|
|
|
SHA string `xorm:"VARCHAR(64) NOT NULL INDEX UNIQUE(repo_sha_index)"`
|
|
|
|
TargetURL string `xorm:"TEXT"`
|
|
|
|
Description string `xorm:"TEXT"`
|
2024-01-20 01:05:02 +09:00
|
|
|
ContextHash string `xorm:"VARCHAR(64) index"`
|
2021-12-10 10:27:50 +09:00
|
|
|
Context string `xorm:"TEXT"`
|
|
|
|
Creator *user_model.User `xorm:"-"`
|
2017-04-21 20:32:31 +09:00
|
|
|
CreatorID int64
|
|
|
|
|
2019-08-15 23:46:21 +09:00
|
|
|
CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"`
|
|
|
|
UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"`
|
2017-04-21 20:32:31 +09:00
|
|
|
}
|
|
|
|
|
2021-09-19 20:49:59 +09:00
|
|
|
func init() {
|
|
|
|
db.RegisterModel(new(CommitStatus))
|
2021-09-23 19:50:06 +09:00
|
|
|
db.RegisterModel(new(CommitStatusIndex))
|
|
|
|
}
|
|
|
|
|
2022-12-02 12:15:36 +09:00
|
|
|
func postgresGetCommitStatusIndex(ctx context.Context, repoID int64, sha string) (int64, error) {
|
|
|
|
res, err := db.GetEngine(ctx).Query("INSERT INTO `commit_status_index` (repo_id, sha, max_index) "+
|
|
|
|
"VALUES (?,?,1) ON CONFLICT (repo_id, sha) DO UPDATE SET max_index = `commit_status_index`.max_index+1 RETURNING max_index",
|
|
|
|
repoID, sha)
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
if len(res) == 0 {
|
|
|
|
return 0, db.ErrGetResourceIndexFailed
|
|
|
|
}
|
|
|
|
return strconv.ParseInt(string(res[0]["max_index"]), 10, 64)
|
|
|
|
}
|
|
|
|
|
2023-06-05 19:33:47 +09:00
|
|
|
func mysqlGetCommitStatusIndex(ctx context.Context, repoID int64, sha string) (int64, error) {
|
|
|
|
if _, err := db.GetEngine(ctx).Exec("INSERT INTO `commit_status_index` (repo_id, sha, max_index) "+
|
|
|
|
"VALUES (?,?,1) ON DUPLICATE KEY UPDATE max_index = max_index+1",
|
|
|
|
repoID, sha); err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
|
|
|
|
var idx int64
|
|
|
|
_, err := db.GetEngine(ctx).SQL("SELECT max_index FROM `commit_status_index` WHERE repo_id = ? AND sha = ?",
|
|
|
|
repoID, sha).Get(&idx)
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
if idx == 0 {
|
|
|
|
return 0, errors.New("cannot get the correct index")
|
|
|
|
}
|
|
|
|
return idx, nil
|
|
|
|
}
|
|
|
|
|
2023-06-15 09:14:43 +09:00
|
|
|
func mssqlGetCommitStatusIndex(ctx context.Context, repoID int64, sha string) (int64, error) {
|
|
|
|
if _, err := db.GetEngine(ctx).Exec(`
|
|
|
|
MERGE INTO commit_status_index WITH (HOLDLOCK) AS target
|
|
|
|
USING (SELECT ? AS repo_id, ? AS sha) AS source
|
|
|
|
(repo_id, sha)
|
|
|
|
ON target.repo_id = source.repo_id AND target.sha = source.sha
|
|
|
|
WHEN MATCHED
|
|
|
|
THEN UPDATE
|
|
|
|
SET max_index = max_index + 1
|
|
|
|
WHEN NOT MATCHED
|
|
|
|
THEN INSERT (repo_id, sha, max_index)
|
|
|
|
VALUES (?, ?, 1);
|
|
|
|
`, repoID, sha, repoID, sha); err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
|
|
|
|
var idx int64
|
|
|
|
_, err := db.GetEngine(ctx).SQL("SELECT max_index FROM `commit_status_index` WHERE repo_id = ? AND sha = ?",
|
|
|
|
repoID, sha).Get(&idx)
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
if idx == 0 {
|
|
|
|
return 0, errors.New("cannot get the correct index")
|
|
|
|
}
|
|
|
|
return idx, nil
|
|
|
|
}
|
|
|
|
|
2021-09-23 19:50:06 +09:00
|
|
|
// GetNextCommitStatusIndex retried 3 times to generate a resource index
|
2022-12-01 01:41:49 +09:00
|
|
|
func GetNextCommitStatusIndex(ctx context.Context, repoID int64, sha string) (int64, error) {
|
2023-12-19 16:20:47 +09:00
|
|
|
_, err := git.NewIDFromString(sha)
|
2023-12-14 06:02:00 +09:00
|
|
|
if err != nil {
|
2023-06-15 09:14:43 +09:00
|
|
|
return 0, git.ErrInvalidSHA{SHA: sha}
|
|
|
|
}
|
|
|
|
|
2023-06-05 19:33:47 +09:00
|
|
|
switch {
|
|
|
|
case setting.Database.Type.IsPostgreSQL():
|
2022-12-02 12:15:36 +09:00
|
|
|
return postgresGetCommitStatusIndex(ctx, repoID, sha)
|
2023-06-05 19:33:47 +09:00
|
|
|
case setting.Database.Type.IsMySQL():
|
|
|
|
return mysqlGetCommitStatusIndex(ctx, repoID, sha)
|
2023-06-15 09:14:43 +09:00
|
|
|
case setting.Database.Type.IsMSSQL():
|
|
|
|
return mssqlGetCommitStatusIndex(ctx, repoID, sha)
|
2022-12-02 12:15:36 +09:00
|
|
|
}
|
|
|
|
|
2022-12-01 01:41:49 +09:00
|
|
|
e := db.GetEngine(ctx)
|
2021-09-23 19:50:06 +09:00
|
|
|
|
2022-12-01 01:41:49 +09:00
|
|
|
// try to update the max_index to next value, and acquire the write-lock for the record
|
|
|
|
res, err := e.Exec("UPDATE `commit_status_index` SET max_index=max_index+1 WHERE repo_id=? AND sha=?", repoID, sha)
|
2021-09-23 19:50:06 +09:00
|
|
|
if err != nil {
|
2023-06-05 19:33:47 +09:00
|
|
|
return 0, fmt.Errorf("update failed: %w", err)
|
2021-09-23 19:50:06 +09:00
|
|
|
}
|
2022-12-01 01:41:49 +09:00
|
|
|
affected, err := res.RowsAffected()
|
2021-09-23 19:50:06 +09:00
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
2022-12-01 01:41:49 +09:00
|
|
|
if affected == 0 {
|
|
|
|
// this slow path is only for the first time of creating a resource index
|
|
|
|
_, errIns := e.Exec("INSERT INTO `commit_status_index` (repo_id, sha, max_index) VALUES (?, ?, 0)", repoID, sha)
|
|
|
|
res, err = e.Exec("UPDATE `commit_status_index` SET max_index=max_index+1 WHERE repo_id=? AND sha=?", repoID, sha)
|
|
|
|
if err != nil {
|
2023-06-05 19:33:47 +09:00
|
|
|
return 0, fmt.Errorf("update2 failed: %w", err)
|
2022-12-01 01:41:49 +09:00
|
|
|
}
|
|
|
|
affected, err = res.RowsAffected()
|
|
|
|
if err != nil {
|
2023-06-05 19:33:47 +09:00
|
|
|
return 0, fmt.Errorf("RowsAffected failed: %w", err)
|
2022-12-01 01:41:49 +09:00
|
|
|
}
|
|
|
|
// if the update still can not update any records, the record must not exist and there must be some errors (insert error)
|
|
|
|
if affected == 0 {
|
|
|
|
if errIns == nil {
|
|
|
|
return 0, errors.New("impossible error when GetNextCommitStatusIndex, insert and update both succeeded but no record is updated")
|
|
|
|
}
|
2023-06-05 19:33:47 +09:00
|
|
|
return 0, fmt.Errorf("insert failed: %w", errIns)
|
2022-12-01 01:41:49 +09:00
|
|
|
}
|
2021-09-23 19:50:06 +09:00
|
|
|
}
|
|
|
|
|
2022-12-01 01:41:49 +09:00
|
|
|
// now, the new index is in database (protected by the transaction and write-lock)
|
|
|
|
var newIdx int64
|
|
|
|
has, err := e.SQL("SELECT max_index FROM `commit_status_index` WHERE repo_id=? AND sha=?", repoID, sha).Get(&newIdx)
|
2021-09-23 19:50:06 +09:00
|
|
|
if err != nil {
|
2023-06-05 19:33:47 +09:00
|
|
|
return 0, fmt.Errorf("select failed: %w", err)
|
2021-09-23 19:50:06 +09:00
|
|
|
}
|
|
|
|
if !has {
|
2022-12-01 01:41:49 +09:00
|
|
|
return 0, errors.New("impossible error when GetNextCommitStatusIndex, upsert succeeded but no record can be selected")
|
2021-09-23 19:50:06 +09:00
|
|
|
}
|
2022-12-01 01:41:49 +09:00
|
|
|
return newIdx, nil
|
2021-09-19 20:49:59 +09:00
|
|
|
}
|
|
|
|
|
2021-12-10 10:27:50 +09:00
|
|
|
func (status *CommitStatus) loadAttributes(ctx context.Context) (err error) {
|
2017-04-21 20:32:31 +09:00
|
|
|
if status.Repo == nil {
|
2022-12-03 11:48:26 +09:00
|
|
|
status.Repo, err = repo_model.GetRepositoryByID(ctx, status.RepoID)
|
2017-04-21 20:32:31 +09:00
|
|
|
if err != nil {
|
2022-10-25 04:29:17 +09:00
|
|
|
return fmt.Errorf("getRepositoryByID [%d]: %w", status.RepoID, err)
|
2017-04-21 20:32:31 +09:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if status.Creator == nil && status.CreatorID > 0 {
|
2022-12-03 11:48:26 +09:00
|
|
|
status.Creator, err = user_model.GetUserByID(ctx, status.CreatorID)
|
2017-04-21 20:32:31 +09:00
|
|
|
if err != nil {
|
2022-10-25 04:29:17 +09:00
|
|
|
return fmt.Errorf("getUserByID [%d]: %w", status.CreatorID, err)
|
2017-04-21 20:32:31 +09:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// APIURL returns the absolute APIURL to this commit-status.
|
2023-01-09 12:50:54 +09:00
|
|
|
func (status *CommitStatus) APIURL(ctx context.Context) string {
|
|
|
|
_ = status.loadAttributes(ctx)
|
2021-11-17 03:18:25 +09:00
|
|
|
return status.Repo.APIURL() + "/statuses/" + url.PathEscape(status.SHA)
|
2017-04-21 20:32:31 +09:00
|
|
|
}
|
|
|
|
|
2023-08-21 16:26:10 +09:00
|
|
|
// LocaleString returns the locale string name of the Status
|
|
|
|
func (status *CommitStatus) LocaleString(lang translation.Locale) string {
|
2024-02-15 06:48:45 +09:00
|
|
|
return lang.TrString("repo.commitstatus." + status.State.String())
|
2023-08-21 16:26:10 +09:00
|
|
|
}
|
|
|
|
|
2017-09-14 15:51:32 +09:00
|
|
|
// CalcCommitStatus returns commit status state via some status, the commit statues should order by id desc
|
|
|
|
func CalcCommitStatus(statuses []*CommitStatus) *CommitStatus {
|
|
|
|
var lastStatus *CommitStatus
|
2023-07-25 21:46:02 +09:00
|
|
|
state := api.CommitStatusSuccess
|
2017-09-14 15:51:32 +09:00
|
|
|
for _, status := range statuses {
|
2020-01-22 12:46:04 +09:00
|
|
|
if status.State.NoBetterThan(state) {
|
2017-09-14 15:51:32 +09:00
|
|
|
state = status.State
|
|
|
|
lastStatus = status
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if lastStatus == nil {
|
|
|
|
if len(statuses) > 0 {
|
|
|
|
lastStatus = statuses[0]
|
|
|
|
} else {
|
|
|
|
lastStatus = &CommitStatus{}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return lastStatus
|
|
|
|
}
|
|
|
|
|
2019-07-25 19:55:06 +09:00
|
|
|
// CommitStatusOptions holds the options for query commit statuses
|
|
|
|
type CommitStatusOptions struct {
|
2021-09-24 20:32:56 +09:00
|
|
|
db.ListOptions
|
2024-01-15 23:07:32 +09:00
|
|
|
RepoID int64
|
|
|
|
SHA string
|
2019-07-25 19:55:06 +09:00
|
|
|
State string
|
|
|
|
SortType string
|
|
|
|
}
|
|
|
|
|
2024-01-15 23:07:32 +09:00
|
|
|
func (opts *CommitStatusOptions) ToConds() builder.Cond {
|
|
|
|
var cond builder.Cond = builder.Eq{
|
|
|
|
"repo_id": opts.RepoID,
|
|
|
|
"sha": opts.SHA,
|
2019-07-25 19:55:06 +09:00
|
|
|
}
|
|
|
|
|
|
|
|
switch opts.State {
|
|
|
|
case "pending", "success", "error", "failure", "warning":
|
2024-01-15 23:07:32 +09:00
|
|
|
cond = cond.And(builder.Eq{
|
|
|
|
"state": opts.State,
|
|
|
|
})
|
2019-07-25 19:55:06 +09:00
|
|
|
}
|
2024-01-15 23:07:32 +09:00
|
|
|
|
|
|
|
return cond
|
2019-07-25 19:55:06 +09:00
|
|
|
}
|
|
|
|
|
2024-01-15 23:07:32 +09:00
|
|
|
func (opts *CommitStatusOptions) ToOrders() string {
|
|
|
|
switch opts.SortType {
|
2019-07-25 19:55:06 +09:00
|
|
|
case "oldest":
|
2024-01-15 23:07:32 +09:00
|
|
|
return "created_unix ASC"
|
2019-07-25 19:55:06 +09:00
|
|
|
case "recentupdate":
|
2024-01-15 23:07:32 +09:00
|
|
|
return "updated_unix DESC"
|
2019-07-25 19:55:06 +09:00
|
|
|
case "leastupdate":
|
2024-01-15 23:07:32 +09:00
|
|
|
return "updated_unix ASC"
|
2019-07-25 19:55:06 +09:00
|
|
|
case "leastindex":
|
2024-01-15 23:07:32 +09:00
|
|
|
return "`index` DESC"
|
2019-07-25 19:55:06 +09:00
|
|
|
case "highestindex":
|
2024-01-15 23:07:32 +09:00
|
|
|
return "`index` ASC"
|
2019-07-25 19:55:06 +09:00
|
|
|
default:
|
2024-01-15 23:07:32 +09:00
|
|
|
return "created_unix DESC"
|
2019-07-25 19:55:06 +09:00
|
|
|
}
|
2017-04-21 20:32:31 +09:00
|
|
|
}
|
|
|
|
|
2021-09-23 19:50:06 +09:00
|
|
|
// CommitStatusIndex represents a table for commit status index
|
|
|
|
type CommitStatusIndex struct {
|
|
|
|
ID int64
|
|
|
|
RepoID int64 `xorm:"unique(repo_sha)"`
|
|
|
|
SHA string `xorm:"unique(repo_sha)"`
|
|
|
|
MaxIndex int64 `xorm:"index"`
|
|
|
|
}
|
|
|
|
|
2017-04-21 20:32:31 +09:00
|
|
|
// GetLatestCommitStatus returns all statuses with a unique context for a given commit.
|
2022-05-20 23:08:52 +09:00
|
|
|
func GetLatestCommitStatus(ctx context.Context, repoID int64, sha string, listOptions db.ListOptions) ([]*CommitStatus, int64, error) {
|
2017-05-07 23:40:31 +09:00
|
|
|
ids := make([]int64, 0, 10)
|
2022-04-28 20:48:48 +09:00
|
|
|
sess := db.GetEngine(ctx).Table(&CommitStatus{}).
|
2020-12-18 12:33:32 +09:00
|
|
|
Where("repo_id = ?", repoID).And("sha = ?", sha).
|
2017-05-07 23:40:31 +09:00
|
|
|
Select("max( id ) as id").
|
2020-12-18 12:33:32 +09:00
|
|
|
GroupBy("context_hash").OrderBy("max( id ) desc")
|
2023-07-31 11:21:09 +09:00
|
|
|
if !listOptions.IsListAll() {
|
|
|
|
sess = db.SetSessionPagination(sess, &listOptions)
|
|
|
|
}
|
2021-12-15 14:39:34 +09:00
|
|
|
count, err := sess.FindAndCount(&ids)
|
2017-05-07 23:40:31 +09:00
|
|
|
if err != nil {
|
2021-12-15 14:39:34 +09:00
|
|
|
return nil, count, err
|
2017-05-07 23:40:31 +09:00
|
|
|
}
|
|
|
|
statuses := make([]*CommitStatus, 0, len(ids))
|
|
|
|
if len(ids) == 0 {
|
2021-12-15 14:39:34 +09:00
|
|
|
return statuses, count, nil
|
2017-05-07 23:40:31 +09:00
|
|
|
}
|
2022-04-28 20:48:48 +09:00
|
|
|
return statuses, count, db.GetEngine(ctx).In("id", ids).Find(&statuses)
|
2017-04-21 20:32:31 +09:00
|
|
|
}
|
|
|
|
|
2023-05-14 06:59:01 +09:00
|
|
|
// GetLatestCommitStatusForPairs returns all statuses with a unique context for a given list of repo-sha pairs
|
|
|
|
func GetLatestCommitStatusForPairs(ctx context.Context, repoIDsToLatestCommitSHAs map[int64]string, listOptions db.ListOptions) (map[int64][]*CommitStatus, error) {
|
|
|
|
type result struct {
|
|
|
|
ID int64
|
|
|
|
RepoID int64
|
|
|
|
}
|
|
|
|
|
|
|
|
results := make([]result, 0, len(repoIDsToLatestCommitSHAs))
|
|
|
|
|
|
|
|
sess := db.GetEngine(ctx).Table(&CommitStatus{})
|
|
|
|
|
|
|
|
// Create a disjunction of conditions for each repoID and SHA pair
|
|
|
|
conds := make([]builder.Cond, 0, len(repoIDsToLatestCommitSHAs))
|
|
|
|
for repoID, sha := range repoIDsToLatestCommitSHAs {
|
|
|
|
conds = append(conds, builder.Eq{"repo_id": repoID, "sha": sha})
|
|
|
|
}
|
|
|
|
sess = sess.Where(builder.Or(conds...)).
|
|
|
|
Select("max( id ) as id, repo_id").
|
|
|
|
GroupBy("context_hash, repo_id").OrderBy("max( id ) desc")
|
|
|
|
|
2023-12-11 15:19:57 +09:00
|
|
|
if !listOptions.IsListAll() {
|
|
|
|
sess = db.SetSessionPagination(sess, &listOptions)
|
|
|
|
}
|
2023-05-14 06:59:01 +09:00
|
|
|
|
|
|
|
err := sess.Find(&results)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
ids := make([]int64, 0, len(results))
|
|
|
|
repoStatuses := make(map[int64][]*CommitStatus)
|
|
|
|
for _, result := range results {
|
|
|
|
ids = append(ids, result.ID)
|
|
|
|
}
|
|
|
|
|
|
|
|
statuses := make([]*CommitStatus, 0, len(ids))
|
|
|
|
if len(ids) > 0 {
|
|
|
|
err = db.GetEngine(ctx).In("id", ids).Find(&statuses)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Group the statuses by repo ID
|
|
|
|
for _, status := range statuses {
|
|
|
|
repoStatuses[status.RepoID] = append(repoStatuses[status.RepoID], status)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return repoStatuses, nil
|
|
|
|
}
|
|
|
|
|
2023-07-03 12:32:21 +09:00
|
|
|
// GetLatestCommitStatusForRepoCommitIDs returns all statuses with a unique context for a given list of repo-sha pairs
|
|
|
|
func GetLatestCommitStatusForRepoCommitIDs(ctx context.Context, repoID int64, commitIDs []string) (map[string][]*CommitStatus, error) {
|
|
|
|
type result struct {
|
|
|
|
ID int64
|
|
|
|
Sha string
|
|
|
|
}
|
|
|
|
|
|
|
|
results := make([]result, 0, len(commitIDs))
|
|
|
|
|
|
|
|
sess := db.GetEngine(ctx).Table(&CommitStatus{})
|
|
|
|
|
|
|
|
// Create a disjunction of conditions for each repoID and SHA pair
|
|
|
|
conds := make([]builder.Cond, 0, len(commitIDs))
|
|
|
|
for _, sha := range commitIDs {
|
|
|
|
conds = append(conds, builder.Eq{"sha": sha})
|
|
|
|
}
|
|
|
|
sess = sess.Where(builder.Eq{"repo_id": repoID}.And(builder.Or(conds...))).
|
|
|
|
Select("max( id ) as id, sha").
|
|
|
|
GroupBy("context_hash, sha").OrderBy("max( id ) desc")
|
|
|
|
|
|
|
|
err := sess.Find(&results)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
ids := make([]int64, 0, len(results))
|
|
|
|
repoStatuses := make(map[string][]*CommitStatus)
|
|
|
|
for _, result := range results {
|
|
|
|
ids = append(ids, result.ID)
|
|
|
|
}
|
|
|
|
|
|
|
|
statuses := make([]*CommitStatus, 0, len(ids))
|
|
|
|
if len(ids) > 0 {
|
|
|
|
err = db.GetEngine(ctx).In("id", ids).Find(&statuses)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Group the statuses by repo ID
|
|
|
|
for _, status := range statuses {
|
|
|
|
repoStatuses[status.SHA] = append(repoStatuses[status.SHA], status)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return repoStatuses, nil
|
|
|
|
}
|
|
|
|
|
2019-09-18 14:39:45 +09:00
|
|
|
// FindRepoRecentCommitStatusContexts returns repository's recent commit status contexts
|
2023-01-09 12:50:54 +09:00
|
|
|
func FindRepoRecentCommitStatusContexts(ctx context.Context, repoID int64, before time.Duration) ([]string, error) {
|
2019-09-18 14:39:45 +09:00
|
|
|
start := timeutil.TimeStampNow().AddDuration(-before)
|
|
|
|
ids := make([]int64, 0, 10)
|
2023-01-09 12:50:54 +09:00
|
|
|
if err := db.GetEngine(ctx).Table("commit_status").
|
2019-09-18 14:39:45 +09:00
|
|
|
Where("repo_id = ?", repoID).
|
|
|
|
And("updated_unix >= ?", start).
|
|
|
|
Select("max( id ) as id").
|
|
|
|
GroupBy("context_hash").OrderBy("max( id ) desc").
|
|
|
|
Find(&ids); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2021-03-15 03:52:12 +09:00
|
|
|
contexts := make([]string, 0, len(ids))
|
2019-09-18 14:39:45 +09:00
|
|
|
if len(ids) == 0 {
|
|
|
|
return contexts, nil
|
|
|
|
}
|
2023-01-09 12:50:54 +09:00
|
|
|
return contexts, db.GetEngine(ctx).Select("context").Table("commit_status").In("id", ids).Find(&contexts)
|
2019-09-18 14:39:45 +09:00
|
|
|
}
|
|
|
|
|
2017-04-21 20:32:31 +09:00
|
|
|
// NewCommitStatusOptions holds options for creating a CommitStatus
|
|
|
|
type NewCommitStatusOptions struct {
|
2021-12-10 10:27:50 +09:00
|
|
|
Repo *repo_model.Repository
|
2021-11-24 18:49:20 +09:00
|
|
|
Creator *user_model.User
|
2023-12-14 06:02:00 +09:00
|
|
|
SHA git.ObjectID
|
2017-04-21 20:32:31 +09:00
|
|
|
CommitStatus *CommitStatus
|
|
|
|
}
|
|
|
|
|
2019-06-30 16:57:59 +09:00
|
|
|
// NewCommitStatus save commit statuses into database
|
2023-01-09 12:50:54 +09:00
|
|
|
func NewCommitStatus(ctx context.Context, opts NewCommitStatusOptions) error {
|
2017-04-21 20:32:31 +09:00
|
|
|
if opts.Repo == nil {
|
2019-06-30 16:57:59 +09:00
|
|
|
return fmt.Errorf("NewCommitStatus[nil, %s]: no repository specified", opts.SHA)
|
2017-04-21 20:32:31 +09:00
|
|
|
}
|
|
|
|
|
2019-06-30 16:57:59 +09:00
|
|
|
repoPath := opts.Repo.RepoPath()
|
2017-04-21 20:32:31 +09:00
|
|
|
if opts.Creator == nil {
|
2019-06-30 16:57:59 +09:00
|
|
|
return fmt.Errorf("NewCommitStatus[%s, %s]: no user specified", repoPath, opts.SHA)
|
2017-04-21 20:32:31 +09:00
|
|
|
}
|
|
|
|
|
2023-01-09 12:50:54 +09:00
|
|
|
ctx, committer, err := db.TxContext(ctx)
|
2021-09-19 20:49:59 +09:00
|
|
|
if err != nil {
|
2022-10-25 04:29:17 +09:00
|
|
|
return fmt.Errorf("NewCommitStatus[repo_id: %d, user_id: %d, sha: %s]: %w", opts.Repo.ID, opts.Creator.ID, opts.SHA, err)
|
2017-04-21 20:32:31 +09:00
|
|
|
}
|
2021-09-19 20:49:59 +09:00
|
|
|
defer committer.Close()
|
2017-04-21 20:32:31 +09:00
|
|
|
|
2022-12-01 01:41:49 +09:00
|
|
|
// Get the next Status Index
|
2023-12-14 06:02:00 +09:00
|
|
|
idx, err := GetNextCommitStatusIndex(ctx, opts.Repo.ID, opts.SHA.String())
|
2022-12-01 01:41:49 +09:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("generate commit status index failed: %w", err)
|
|
|
|
}
|
|
|
|
|
2019-06-30 16:57:59 +09:00
|
|
|
opts.CommitStatus.Description = strings.TrimSpace(opts.CommitStatus.Description)
|
|
|
|
opts.CommitStatus.Context = strings.TrimSpace(opts.CommitStatus.Context)
|
|
|
|
opts.CommitStatus.TargetURL = strings.TrimSpace(opts.CommitStatus.TargetURL)
|
2023-12-14 06:02:00 +09:00
|
|
|
opts.CommitStatus.SHA = opts.SHA.String()
|
2019-06-30 16:57:59 +09:00
|
|
|
opts.CommitStatus.CreatorID = opts.Creator.ID
|
|
|
|
opts.CommitStatus.RepoID = opts.Repo.ID
|
2021-09-23 19:50:06 +09:00
|
|
|
opts.CommitStatus.Index = idx
|
2019-06-30 16:57:59 +09:00
|
|
|
log.Debug("NewCommitStatus[%s, %s]: %d", repoPath, opts.SHA, opts.CommitStatus.Index)
|
|
|
|
|
|
|
|
opts.CommitStatus.ContextHash = hashCommitStatusContext(opts.CommitStatus.Context)
|
2017-04-21 20:32:31 +09:00
|
|
|
|
|
|
|
// Insert new CommitStatus
|
2021-09-24 00:45:36 +09:00
|
|
|
if _, err = db.GetEngine(ctx).Insert(opts.CommitStatus); err != nil {
|
2022-12-01 01:41:49 +09:00
|
|
|
return fmt.Errorf("insert CommitStatus[%s, %s]: %w", repoPath, opts.SHA, err)
|
2017-04-21 20:32:31 +09:00
|
|
|
}
|
|
|
|
|
2021-09-19 20:49:59 +09:00
|
|
|
return committer.Commit()
|
2017-04-21 20:32:31 +09:00
|
|
|
}
|
2017-05-07 23:40:31 +09:00
|
|
|
|
|
|
|
// SignCommitWithStatuses represents a commit with validation of signature and status state.
|
|
|
|
type SignCommitWithStatuses struct {
|
2020-12-20 12:13:12 +09:00
|
|
|
Status *CommitStatus
|
|
|
|
Statuses []*CommitStatus
|
2021-12-10 17:14:24 +09:00
|
|
|
*asymkey_model.SignCommit
|
2017-05-07 23:40:31 +09:00
|
|
|
}
|
|
|
|
|
|
|
|
// ParseCommitsWithStatus checks commits latest statuses and calculates its worst status state
|
2023-01-09 12:50:54 +09:00
|
|
|
func ParseCommitsWithStatus(ctx context.Context, oldCommits []*asymkey_model.SignCommit, repo *repo_model.Repository) []*SignCommitWithStatuses {
|
2021-08-10 03:08:51 +09:00
|
|
|
newCommits := make([]*SignCommitWithStatuses, 0, len(oldCommits))
|
|
|
|
|
|
|
|
for _, c := range oldCommits {
|
|
|
|
commit := &SignCommitWithStatuses{
|
|
|
|
SignCommit: c,
|
2017-05-07 23:40:31 +09:00
|
|
|
}
|
2023-01-09 12:50:54 +09:00
|
|
|
statuses, _, err := GetLatestCommitStatus(ctx, repo.ID, commit.ID.String(), db.ListOptions{})
|
2017-05-07 23:40:31 +09:00
|
|
|
if err != nil {
|
2019-04-02 16:48:31 +09:00
|
|
|
log.Error("GetLatestCommitStatus: %v", err)
|
2017-05-07 23:40:31 +09:00
|
|
|
} else {
|
2020-12-20 12:13:12 +09:00
|
|
|
commit.Statuses = statuses
|
2017-09-14 15:51:32 +09:00
|
|
|
commit.Status = CalcCommitStatus(statuses)
|
2017-05-07 23:40:31 +09:00
|
|
|
}
|
|
|
|
|
2021-08-10 03:08:51 +09:00
|
|
|
newCommits = append(newCommits, commit)
|
2017-05-07 23:40:31 +09:00
|
|
|
}
|
|
|
|
return newCommits
|
|
|
|
}
|
2019-06-30 16:57:59 +09:00
|
|
|
|
|
|
|
// hashCommitStatusContext hash context
|
|
|
|
func hashCommitStatusContext(context string) string {
|
|
|
|
return fmt.Sprintf("%x", sha1.Sum([]byte(context)))
|
|
|
|
}
|
2022-06-13 00:51:54 +09:00
|
|
|
|
|
|
|
// ConvertFromGitCommit converts git commits into SignCommitWithStatuses
|
2023-01-09 12:50:54 +09:00
|
|
|
func ConvertFromGitCommit(ctx context.Context, commits []*git.Commit, repo *repo_model.Repository) []*SignCommitWithStatuses {
|
|
|
|
return ParseCommitsWithStatus(ctx,
|
2022-06-13 00:51:54 +09:00
|
|
|
asymkey_model.ParseCommitsWithSignature(
|
Add context cache as a request level cache (#22294)
To avoid duplicated load of the same data in an HTTP request, we can set
a context cache to do that. i.e. Some pages may load a user from a
database with the same id in different areas on the same page. But the
code is hidden in two different deep logic. How should we share the
user? As a result of this PR, now if both entry functions accept
`context.Context` as the first parameter and we just need to refactor
`GetUserByID` to reuse the user from the context cache. Then it will not
be loaded twice on an HTTP request.
But of course, sometimes we would like to reload an object from the
database, that's why `RemoveContextData` is also exposed.
The core context cache is here. It defines a new context
```go
type cacheContext struct {
ctx context.Context
data map[any]map[any]any
lock sync.RWMutex
}
var cacheContextKey = struct{}{}
func WithCacheContext(ctx context.Context) context.Context {
return context.WithValue(ctx, cacheContextKey, &cacheContext{
ctx: ctx,
data: make(map[any]map[any]any),
})
}
```
Then you can use the below 4 methods to read/write/del the data within
the same context.
```go
func GetContextData(ctx context.Context, tp, key any) any
func SetContextData(ctx context.Context, tp, key, value any)
func RemoveContextData(ctx context.Context, tp, key any)
func GetWithContextCache[T any](ctx context.Context, cacheGroupKey string, cacheTargetID any, f func() (T, error)) (T, error)
```
Then let's take a look at how `system.GetString` implement it.
```go
func GetSetting(ctx context.Context, key string) (string, error) {
return cache.GetWithContextCache(ctx, contextCacheKey, key, func() (string, error) {
return cache.GetString(genSettingCacheKey(key), func() (string, error) {
res, err := GetSettingNoCache(ctx, key)
if err != nil {
return "", err
}
return res.SettingValue, nil
})
})
}
```
First, it will check if context data include the setting object with the
key. If not, it will query from the global cache which may be memory or
a Redis cache. If not, it will get the object from the database. In the
end, if the object gets from the global cache or database, it will be
set into the context cache.
An object stored in the context cache will only be destroyed after the
context disappeared.
2023-02-15 22:37:34 +09:00
|
|
|
ctx,
|
|
|
|
user_model.ValidateCommitsWithEmails(ctx, commits),
|
2022-06-13 00:51:54 +09:00
|
|
|
repo.GetTrustModel(),
|
|
|
|
func(user *user_model.User) (bool, error) {
|
2023-09-29 21:12:54 +09:00
|
|
|
return repo_model.IsOwnerMemberCollaborator(ctx, repo, user.ID)
|
2022-06-13 00:51:54 +09:00
|
|
|
},
|
|
|
|
),
|
|
|
|
repo,
|
|
|
|
)
|
|
|
|
}
|