2020-01-11 11:59:41 +09:00
|
|
|
// Copyright 2020 The Gitea Authors. All rights reserved.
|
2022-11-28 03:20:29 +09:00
|
|
|
// SPDX-License-Identifier: MIT
|
2020-01-11 11:59:41 +09:00
|
|
|
|
|
|
|
package convert
|
|
|
|
|
|
|
|
import (
|
2022-11-19 17:12:33 +09:00
|
|
|
"context"
|
2021-09-11 01:03:16 +09:00
|
|
|
"fmt"
|
2021-11-17 03:18:25 +09:00
|
|
|
"net/url"
|
2020-02-29 11:49:50 +09:00
|
|
|
"strings"
|
|
|
|
|
2021-12-10 10:27:50 +09:00
|
|
|
"code.gitea.io/gitea/models/db"
|
2022-04-08 18:11:15 +09:00
|
|
|
issues_model "code.gitea.io/gitea/models/issues"
|
2021-12-10 10:27:50 +09:00
|
|
|
repo_model "code.gitea.io/gitea/models/repo"
|
2021-11-24 18:49:20 +09:00
|
|
|
user_model "code.gitea.io/gitea/models/user"
|
2021-09-11 01:03:16 +09:00
|
|
|
"code.gitea.io/gitea/modules/log"
|
|
|
|
"code.gitea.io/gitea/modules/setting"
|
2020-01-11 11:59:41 +09:00
|
|
|
api "code.gitea.io/gitea/modules/structs"
|
|
|
|
)
|
|
|
|
|
2020-02-29 11:49:50 +09:00
|
|
|
// ToAPIIssue converts an Issue to API format
|
|
|
|
// it assumes some fields assigned with values:
|
|
|
|
// Required - Poster, Labels,
|
|
|
|
// Optional - Milestone, Assignee, PullRequest
|
2022-11-19 17:12:33 +09:00
|
|
|
func ToAPIIssue(ctx context.Context, issue *issues_model.Issue) *api.Issue {
|
|
|
|
if err := issue.LoadLabels(ctx); err != nil {
|
2020-02-29 11:49:50 +09:00
|
|
|
return &api.Issue{}
|
|
|
|
}
|
2022-11-19 17:12:33 +09:00
|
|
|
if err := issue.LoadPoster(ctx); err != nil {
|
2020-02-29 11:49:50 +09:00
|
|
|
return &api.Issue{}
|
|
|
|
}
|
2022-11-19 17:12:33 +09:00
|
|
|
if err := issue.LoadRepo(ctx); err != nil {
|
2020-02-29 11:49:50 +09:00
|
|
|
return &api.Issue{}
|
|
|
|
}
|
2023-02-18 21:11:03 +09:00
|
|
|
if err := issue.Repo.LoadOwner(ctx); err != nil {
|
2021-09-11 01:03:16 +09:00
|
|
|
return &api.Issue{}
|
|
|
|
}
|
2020-02-29 11:49:50 +09:00
|
|
|
|
|
|
|
apiIssue := &api.Issue{
|
2022-12-09 15:35:56 +09:00
|
|
|
ID: issue.ID,
|
|
|
|
URL: issue.APIURL(),
|
|
|
|
HTMLURL: issue.HTMLURL(),
|
|
|
|
Index: issue.Index,
|
Add context cache as a request level cache (#22294)
To avoid duplicated load of the same data in an HTTP request, we can set
a context cache to do that. i.e. Some pages may load a user from a
database with the same id in different areas on the same page. But the
code is hidden in two different deep logic. How should we share the
user? As a result of this PR, now if both entry functions accept
`context.Context` as the first parameter and we just need to refactor
`GetUserByID` to reuse the user from the context cache. Then it will not
be loaded twice on an HTTP request.
But of course, sometimes we would like to reload an object from the
database, that's why `RemoveContextData` is also exposed.
The core context cache is here. It defines a new context
```go
type cacheContext struct {
ctx context.Context
data map[any]map[any]any
lock sync.RWMutex
}
var cacheContextKey = struct{}{}
func WithCacheContext(ctx context.Context) context.Context {
return context.WithValue(ctx, cacheContextKey, &cacheContext{
ctx: ctx,
data: make(map[any]map[any]any),
})
}
```
Then you can use the below 4 methods to read/write/del the data within
the same context.
```go
func GetContextData(ctx context.Context, tp, key any) any
func SetContextData(ctx context.Context, tp, key, value any)
func RemoveContextData(ctx context.Context, tp, key any)
func GetWithContextCache[T any](ctx context.Context, cacheGroupKey string, cacheTargetID any, f func() (T, error)) (T, error)
```
Then let's take a look at how `system.GetString` implement it.
```go
func GetSetting(ctx context.Context, key string) (string, error) {
return cache.GetWithContextCache(ctx, contextCacheKey, key, func() (string, error) {
return cache.GetString(genSettingCacheKey(key), func() (string, error) {
res, err := GetSettingNoCache(ctx, key)
if err != nil {
return "", err
}
return res.SettingValue, nil
})
})
}
```
First, it will check if context data include the setting object with the
key. If not, it will query from the global cache which may be memory or
a Redis cache. If not, it will get the object from the database. In the
end, if the object gets from the global cache or database, it will be
set into the context cache.
An object stored in the context cache will only be destroyed after the
context disappeared.
2023-02-15 22:37:34 +09:00
|
|
|
Poster: ToUser(ctx, issue.Poster, nil),
|
2022-12-09 15:35:56 +09:00
|
|
|
Title: issue.Title,
|
|
|
|
Body: issue.Content,
|
|
|
|
Attachments: ToAttachments(issue.Attachments),
|
|
|
|
Ref: issue.Ref,
|
|
|
|
Labels: ToLabelList(issue.Labels, issue.Repo, issue.Repo.Owner),
|
|
|
|
State: issue.State(),
|
|
|
|
IsLocked: issue.IsLocked,
|
|
|
|
Comments: issue.NumComments,
|
|
|
|
Created: issue.CreatedUnix.AsTime(),
|
|
|
|
Updated: issue.UpdatedUnix.AsTime(),
|
2020-02-29 11:49:50 +09:00
|
|
|
}
|
|
|
|
|
|
|
|
apiIssue.Repo = &api.RepositoryMeta{
|
|
|
|
ID: issue.Repo.ID,
|
|
|
|
Name: issue.Repo.Name,
|
|
|
|
Owner: issue.Repo.OwnerName,
|
|
|
|
FullName: issue.Repo.FullName(),
|
|
|
|
}
|
|
|
|
|
|
|
|
if issue.ClosedUnix != 0 {
|
|
|
|
apiIssue.Closed = issue.ClosedUnix.AsTimePtr()
|
|
|
|
}
|
|
|
|
|
2022-11-19 17:12:33 +09:00
|
|
|
if err := issue.LoadMilestone(ctx); err != nil {
|
2020-02-29 11:49:50 +09:00
|
|
|
return &api.Issue{}
|
|
|
|
}
|
|
|
|
if issue.Milestone != nil {
|
2020-05-13 06:54:35 +09:00
|
|
|
apiIssue.Milestone = ToAPIMilestone(issue.Milestone)
|
2020-02-29 11:49:50 +09:00
|
|
|
}
|
|
|
|
|
2022-11-19 17:12:33 +09:00
|
|
|
if err := issue.LoadAssignees(ctx); err != nil {
|
2020-02-29 11:49:50 +09:00
|
|
|
return &api.Issue{}
|
|
|
|
}
|
|
|
|
if len(issue.Assignees) > 0 {
|
|
|
|
for _, assignee := range issue.Assignees {
|
Add context cache as a request level cache (#22294)
To avoid duplicated load of the same data in an HTTP request, we can set
a context cache to do that. i.e. Some pages may load a user from a
database with the same id in different areas on the same page. But the
code is hidden in two different deep logic. How should we share the
user? As a result of this PR, now if both entry functions accept
`context.Context` as the first parameter and we just need to refactor
`GetUserByID` to reuse the user from the context cache. Then it will not
be loaded twice on an HTTP request.
But of course, sometimes we would like to reload an object from the
database, that's why `RemoveContextData` is also exposed.
The core context cache is here. It defines a new context
```go
type cacheContext struct {
ctx context.Context
data map[any]map[any]any
lock sync.RWMutex
}
var cacheContextKey = struct{}{}
func WithCacheContext(ctx context.Context) context.Context {
return context.WithValue(ctx, cacheContextKey, &cacheContext{
ctx: ctx,
data: make(map[any]map[any]any),
})
}
```
Then you can use the below 4 methods to read/write/del the data within
the same context.
```go
func GetContextData(ctx context.Context, tp, key any) any
func SetContextData(ctx context.Context, tp, key, value any)
func RemoveContextData(ctx context.Context, tp, key any)
func GetWithContextCache[T any](ctx context.Context, cacheGroupKey string, cacheTargetID any, f func() (T, error)) (T, error)
```
Then let's take a look at how `system.GetString` implement it.
```go
func GetSetting(ctx context.Context, key string) (string, error) {
return cache.GetWithContextCache(ctx, contextCacheKey, key, func() (string, error) {
return cache.GetString(genSettingCacheKey(key), func() (string, error) {
res, err := GetSettingNoCache(ctx, key)
if err != nil {
return "", err
}
return res.SettingValue, nil
})
})
}
```
First, it will check if context data include the setting object with the
key. If not, it will query from the global cache which may be memory or
a Redis cache. If not, it will get the object from the database. In the
end, if the object gets from the global cache or database, it will be
set into the context cache.
An object stored in the context cache will only be destroyed after the
context disappeared.
2023-02-15 22:37:34 +09:00
|
|
|
apiIssue.Assignees = append(apiIssue.Assignees, ToUser(ctx, assignee, nil))
|
2020-02-29 11:49:50 +09:00
|
|
|
}
|
Add context cache as a request level cache (#22294)
To avoid duplicated load of the same data in an HTTP request, we can set
a context cache to do that. i.e. Some pages may load a user from a
database with the same id in different areas on the same page. But the
code is hidden in two different deep logic. How should we share the
user? As a result of this PR, now if both entry functions accept
`context.Context` as the first parameter and we just need to refactor
`GetUserByID` to reuse the user from the context cache. Then it will not
be loaded twice on an HTTP request.
But of course, sometimes we would like to reload an object from the
database, that's why `RemoveContextData` is also exposed.
The core context cache is here. It defines a new context
```go
type cacheContext struct {
ctx context.Context
data map[any]map[any]any
lock sync.RWMutex
}
var cacheContextKey = struct{}{}
func WithCacheContext(ctx context.Context) context.Context {
return context.WithValue(ctx, cacheContextKey, &cacheContext{
ctx: ctx,
data: make(map[any]map[any]any),
})
}
```
Then you can use the below 4 methods to read/write/del the data within
the same context.
```go
func GetContextData(ctx context.Context, tp, key any) any
func SetContextData(ctx context.Context, tp, key, value any)
func RemoveContextData(ctx context.Context, tp, key any)
func GetWithContextCache[T any](ctx context.Context, cacheGroupKey string, cacheTargetID any, f func() (T, error)) (T, error)
```
Then let's take a look at how `system.GetString` implement it.
```go
func GetSetting(ctx context.Context, key string) (string, error) {
return cache.GetWithContextCache(ctx, contextCacheKey, key, func() (string, error) {
return cache.GetString(genSettingCacheKey(key), func() (string, error) {
res, err := GetSettingNoCache(ctx, key)
if err != nil {
return "", err
}
return res.SettingValue, nil
})
})
}
```
First, it will check if context data include the setting object with the
key. If not, it will query from the global cache which may be memory or
a Redis cache. If not, it will get the object from the database. In the
end, if the object gets from the global cache or database, it will be
set into the context cache.
An object stored in the context cache will only be destroyed after the
context disappeared.
2023-02-15 22:37:34 +09:00
|
|
|
apiIssue.Assignee = ToUser(ctx, issue.Assignees[0], nil) // For compatibility, we're keeping the first assignee as `apiIssue.Assignee`
|
2020-02-29 11:49:50 +09:00
|
|
|
}
|
|
|
|
if issue.IsPull {
|
2022-11-19 17:12:33 +09:00
|
|
|
if err := issue.LoadPullRequest(ctx); err != nil {
|
2020-02-29 11:49:50 +09:00
|
|
|
return &api.Issue{}
|
|
|
|
}
|
|
|
|
apiIssue.PullRequest = &api.PullRequestMeta{
|
|
|
|
HasMerged: issue.PullRequest.HasMerged,
|
|
|
|
}
|
|
|
|
if issue.PullRequest.HasMerged {
|
|
|
|
apiIssue.PullRequest.Merged = issue.PullRequest.MergedUnix.AsTimePtr()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if issue.DeadlineUnix != 0 {
|
|
|
|
apiIssue.Deadline = issue.DeadlineUnix.AsTimePtr()
|
|
|
|
}
|
|
|
|
|
|
|
|
return apiIssue
|
|
|
|
}
|
|
|
|
|
|
|
|
// ToAPIIssueList converts an IssueList to API format
|
2022-11-19 17:12:33 +09:00
|
|
|
func ToAPIIssueList(ctx context.Context, il issues_model.IssueList) []*api.Issue {
|
2020-02-29 11:49:50 +09:00
|
|
|
result := make([]*api.Issue, len(il))
|
|
|
|
for i := range il {
|
2022-11-19 17:12:33 +09:00
|
|
|
result[i] = ToAPIIssue(ctx, il[i])
|
2020-02-29 11:49:50 +09:00
|
|
|
}
|
|
|
|
return result
|
|
|
|
}
|
|
|
|
|
2020-01-11 11:59:41 +09:00
|
|
|
// ToTrackedTime converts TrackedTime to API format
|
2022-11-19 17:12:33 +09:00
|
|
|
func ToTrackedTime(ctx context.Context, t *issues_model.TrackedTime) (apiT *api.TrackedTime) {
|
2020-01-11 11:59:41 +09:00
|
|
|
apiT = &api.TrackedTime{
|
2022-12-05 02:48:18 +09:00
|
|
|
ID: t.ID,
|
|
|
|
IssueID: t.IssueID,
|
|
|
|
UserID: t.UserID,
|
|
|
|
Time: t.Time,
|
|
|
|
Created: t.Created,
|
2020-01-11 11:59:41 +09:00
|
|
|
}
|
|
|
|
if t.Issue != nil {
|
2022-11-19 17:12:33 +09:00
|
|
|
apiT.Issue = ToAPIIssue(ctx, t.Issue)
|
2020-01-11 11:59:41 +09:00
|
|
|
}
|
|
|
|
if t.User != nil {
|
|
|
|
apiT.UserName = t.User.Name
|
|
|
|
}
|
2022-06-20 19:02:49 +09:00
|
|
|
return apiT
|
2020-01-11 11:59:41 +09:00
|
|
|
}
|
|
|
|
|
2020-09-18 21:09:26 +09:00
|
|
|
// ToStopWatches convert Stopwatch list to api.StopWatches
|
2022-06-13 18:37:59 +09:00
|
|
|
func ToStopWatches(sws []*issues_model.Stopwatch) (api.StopWatches, error) {
|
2020-09-18 21:09:26 +09:00
|
|
|
result := api.StopWatches(make([]api.StopWatch, 0, len(sws)))
|
|
|
|
|
2022-06-13 18:37:59 +09:00
|
|
|
issueCache := make(map[int64]*issues_model.Issue)
|
2021-12-10 10:27:50 +09:00
|
|
|
repoCache := make(map[int64]*repo_model.Repository)
|
2020-09-18 21:09:26 +09:00
|
|
|
var (
|
2022-06-13 18:37:59 +09:00
|
|
|
issue *issues_model.Issue
|
2021-12-10 10:27:50 +09:00
|
|
|
repo *repo_model.Repository
|
2020-09-18 21:09:26 +09:00
|
|
|
ok bool
|
|
|
|
err error
|
|
|
|
)
|
|
|
|
|
|
|
|
for _, sw := range sws {
|
|
|
|
issue, ok = issueCache[sw.IssueID]
|
|
|
|
if !ok {
|
2022-06-13 18:37:59 +09:00
|
|
|
issue, err = issues_model.GetIssueByID(db.DefaultContext, sw.IssueID)
|
2020-09-18 21:09:26 +09:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
repo, ok = repoCache[issue.RepoID]
|
|
|
|
if !ok {
|
2022-12-03 11:48:26 +09:00
|
|
|
repo, err = repo_model.GetRepositoryByID(db.DefaultContext, issue.RepoID)
|
2020-09-18 21:09:26 +09:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
result = append(result, api.StopWatch{
|
|
|
|
Created: sw.CreatedUnix.AsTime(),
|
2021-01-21 23:51:52 +09:00
|
|
|
Seconds: sw.Seconds(),
|
|
|
|
Duration: sw.Duration(),
|
2020-09-18 21:09:26 +09:00
|
|
|
IssueIndex: issue.Index,
|
|
|
|
IssueTitle: issue.Title,
|
|
|
|
RepoOwnerName: repo.OwnerName,
|
|
|
|
RepoName: repo.Name,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
return result, nil
|
|
|
|
}
|
|
|
|
|
2020-01-11 11:59:41 +09:00
|
|
|
// ToTrackedTimeList converts TrackedTimeList to API format
|
2022-11-19 17:12:33 +09:00
|
|
|
func ToTrackedTimeList(ctx context.Context, tl issues_model.TrackedTimeList) api.TrackedTimeList {
|
2020-01-11 11:59:41 +09:00
|
|
|
result := make([]*api.TrackedTime, 0, len(tl))
|
|
|
|
for _, t := range tl {
|
2022-11-19 17:12:33 +09:00
|
|
|
result = append(result, ToTrackedTime(ctx, t))
|
2020-01-11 11:59:41 +09:00
|
|
|
}
|
|
|
|
return result
|
|
|
|
}
|
2020-02-29 11:49:50 +09:00
|
|
|
|
|
|
|
// ToLabel converts Label to API format
|
2022-06-13 18:37:59 +09:00
|
|
|
func ToLabel(label *issues_model.Label, repo *repo_model.Repository, org *user_model.User) *api.Label {
|
2021-09-11 01:03:16 +09:00
|
|
|
result := &api.Label{
|
2020-02-29 11:49:50 +09:00
|
|
|
ID: label.ID,
|
|
|
|
Name: label.Name,
|
Scoped labels (#22585)
Add a new "exclusive" option per label. This makes it so that when the
label is named `scope/name`, no other label with the same `scope/`
prefix can be set on an issue.
The scope is determined by the last occurence of `/`, so for example
`scope/alpha/name` and `scope/beta/name` are considered to be in
different scopes and can coexist.
Exclusive scopes are not enforced by any database rules, however they
are enforced when editing labels at the models level, automatically
removing any existing labels in the same scope when either attaching a
new label or replacing all labels.
In menus use a circle instead of checkbox to indicate they function as
radio buttons per scope. Issue filtering by label ensures that only a
single scoped label is selected at a time. Clicking with alt key can be
used to remove a scoped label, both when editing individual issues and
batch editing.
Label rendering refactor for consistency and code simplification:
* Labels now consistently have the same shape, emojis and tooltips
everywhere. This includes the label list and label assignment menus.
* In label list, show description below label same as label menus.
* Don't use exactly black/white text colors to look a bit nicer.
* Simplify text color computation. There is no point computing luminance
in linear color space, as this is a perceptual problem and sRGB is
closer to perceptually linear.
* Increase height of label assignment menus to show more labels. Showing
only 3-4 labels at a time leads to a lot of scrolling.
* Render all labels with a new RenderLabel template helper function.
Label creation and editing in multiline modal menu:
* Change label creation to open a modal menu like label editing.
* Change menu layout to place name, description and colors on separate
lines.
* Don't color cancel button red in label editing modal menu.
* Align text to the left in model menu for better readability and
consistent with settings layout elsewhere.
Custom exclusive scoped label rendering:
* Display scoped label prefix and suffix with slightly darker and
lighter background color respectively, and a slanted edge between them
similar to the `/` symbol.
* In menus exclusive labels are grouped with a divider line.
---------
Co-authored-by: Yarden Shoham <hrsi88@gmail.com>
Co-authored-by: Lauris BH <lauris@nix.lv>
2023-02-19 04:17:39 +09:00
|
|
|
Exclusive: label.Exclusive,
|
2020-02-29 11:49:50 +09:00
|
|
|
Color: strings.TrimLeft(label.Color, "#"),
|
|
|
|
Description: label.Description,
|
|
|
|
}
|
2021-09-11 01:03:16 +09:00
|
|
|
|
|
|
|
// calculate URL
|
|
|
|
if label.BelongsToRepo() && repo != nil {
|
|
|
|
if repo != nil {
|
|
|
|
result.URL = fmt.Sprintf("%s/labels/%d", repo.APIURL(), label.ID)
|
|
|
|
} else {
|
|
|
|
log.Error("ToLabel did not get repo to calculate url for label with id '%d'", label.ID)
|
|
|
|
}
|
|
|
|
} else { // BelongsToOrg
|
|
|
|
if org != nil {
|
2021-11-17 03:18:25 +09:00
|
|
|
result.URL = fmt.Sprintf("%sapi/v1/orgs/%s/labels/%d", setting.AppURL, url.PathEscape(org.Name), label.ID)
|
2021-09-11 01:03:16 +09:00
|
|
|
} else {
|
|
|
|
log.Error("ToLabel did not get org to calculate url for label with id '%d'", label.ID)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return result
|
2020-02-29 11:49:50 +09:00
|
|
|
}
|
|
|
|
|
|
|
|
// ToLabelList converts list of Label to API format
|
2022-06-13 18:37:59 +09:00
|
|
|
func ToLabelList(labels []*issues_model.Label, repo *repo_model.Repository, org *user_model.User) []*api.Label {
|
2020-02-29 11:49:50 +09:00
|
|
|
result := make([]*api.Label, len(labels))
|
|
|
|
for i := range labels {
|
2021-09-11 01:03:16 +09:00
|
|
|
result[i] = ToLabel(labels[i], repo, org)
|
2020-02-29 11:49:50 +09:00
|
|
|
}
|
|
|
|
return result
|
|
|
|
}
|
2020-05-13 06:54:35 +09:00
|
|
|
|
|
|
|
// ToAPIMilestone converts Milestone into API Format
|
2022-04-08 18:11:15 +09:00
|
|
|
func ToAPIMilestone(m *issues_model.Milestone) *api.Milestone {
|
2020-05-13 06:54:35 +09:00
|
|
|
apiMilestone := &api.Milestone{
|
|
|
|
ID: m.ID,
|
|
|
|
State: m.State(),
|
|
|
|
Title: m.Name,
|
|
|
|
Description: m.Content,
|
|
|
|
OpenIssues: m.NumOpenIssues,
|
|
|
|
ClosedIssues: m.NumClosedIssues,
|
2020-09-06 02:38:54 +09:00
|
|
|
Created: m.CreatedUnix.AsTime(),
|
|
|
|
Updated: m.UpdatedUnix.AsTimePtr(),
|
2020-05-13 06:54:35 +09:00
|
|
|
}
|
|
|
|
if m.IsClosed {
|
|
|
|
apiMilestone.Closed = m.ClosedDateUnix.AsTimePtr()
|
|
|
|
}
|
|
|
|
if m.DeadlineUnix.Year() < 9999 {
|
|
|
|
apiMilestone.Deadline = m.DeadlineUnix.AsTimePtr()
|
|
|
|
}
|
|
|
|
return apiMilestone
|
|
|
|
}
|