actions view: move loading of task attributes etc... into own func (#31494)

just a smal refactor to make the function length smaler ... and code
more reusable in the future
pull/33472/head^2
6543 3 weeks ago committed by GitHub
parent 9cd88ef8c7
commit fcfe1fb0fc
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
  1. 148
      routers/web/repo/actions/view.go

@ -281,84 +281,98 @@ func ViewPost(ctx *context_module.Context) {
resp.State.CurrentJob.Steps = make([]*ViewJobStep, 0) // marshal to '[]' instead fo 'null' in json resp.State.CurrentJob.Steps = make([]*ViewJobStep, 0) // marshal to '[]' instead fo 'null' in json
resp.Logs.StepsLog = make([]*ViewStepLog, 0) // marshal to '[]' instead fo 'null' in json resp.Logs.StepsLog = make([]*ViewStepLog, 0) // marshal to '[]' instead fo 'null' in json
if task != nil { if task != nil {
steps := actions.FullSteps(task) steps, logs, err := convertToViewModel(ctx, req.LogCursors, task)
if err != nil {
for _, v := range steps { ctx.Error(http.StatusInternalServerError, err.Error())
resp.State.CurrentJob.Steps = append(resp.State.CurrentJob.Steps, &ViewJobStep{ return
Summary: v.Name,
Duration: v.Duration().String(),
Status: v.Status.String(),
})
} }
resp.State.CurrentJob.Steps = append(resp.State.CurrentJob.Steps, steps...)
resp.Logs.StepsLog = append(resp.Logs.StepsLog, logs...)
}
for _, cursor := range req.LogCursors { ctx.JSON(http.StatusOK, resp)
if !cursor.Expanded { }
continue
} func convertToViewModel(ctx *context_module.Context, cursors []LogCursor, task *actions_model.ActionTask) ([]*ViewJobStep, []*ViewStepLog, error) {
var viewJobs []*ViewJobStep
var logs []*ViewStepLog
steps := actions.FullSteps(task)
for _, v := range steps {
viewJobs = append(viewJobs, &ViewJobStep{
Summary: v.Name,
Duration: v.Duration().String(),
Status: v.Status.String(),
})
}
step := steps[cursor.Step] for _, cursor := range cursors {
if !cursor.Expanded {
// if task log is expired, return a consistent log line continue
if task.LogExpired { }
if cursor.Cursor == 0 {
resp.Logs.StepsLog = append(resp.Logs.StepsLog, &ViewStepLog{ step := steps[cursor.Step]
Step: cursor.Step,
Cursor: 1, // if task log is expired, return a consistent log line
Lines: []*ViewStepLogLine{ if task.LogExpired {
{ if cursor.Cursor == 0 {
Index: 1, logs = append(logs, &ViewStepLog{
Message: ctx.Locale.TrString("actions.runs.expire_log_message"), Step: cursor.Step,
// Timestamp doesn't mean anything when the log is expired. Cursor: 1,
// Set it to the task's updated time since it's probably the time when the log has expired. Lines: []*ViewStepLogLine{
Timestamp: float64(task.Updated.AsTime().UnixNano()) / float64(time.Second), {
}, Index: 1,
Message: ctx.Locale.TrString("actions.runs.expire_log_message"),
// Timestamp doesn't mean anything when the log is expired.
// Set it to the task's updated time since it's probably the time when the log has expired.
Timestamp: float64(task.Updated.AsTime().UnixNano()) / float64(time.Second),
}, },
Started: int64(step.Started), },
}) Started: int64(step.Started),
} })
continue
} }
continue
}
logLines := make([]*ViewStepLogLine, 0) // marshal to '[]' instead fo 'null' in json logLines := make([]*ViewStepLogLine, 0) // marshal to '[]' instead fo 'null' in json
index := step.LogIndex + cursor.Cursor index := step.LogIndex + cursor.Cursor
validCursor := cursor.Cursor >= 0 && validCursor := cursor.Cursor >= 0 &&
// !(cursor.Cursor < step.LogLength) when the frontend tries to fetch next line before it's ready. // !(cursor.Cursor < step.LogLength) when the frontend tries to fetch next line before it's ready.
// So return the same cursor and empty lines to let the frontend retry. // So return the same cursor and empty lines to let the frontend retry.
cursor.Cursor < step.LogLength && cursor.Cursor < step.LogLength &&
// !(index < task.LogIndexes[index]) when task data is older than step data. // !(index < task.LogIndexes[index]) when task data is older than step data.
// It can be fixed by making sure write/read tasks and steps in the same transaction, // It can be fixed by making sure write/read tasks and steps in the same transaction,
// but it's easier to just treat it as fetching the next line before it's ready. // but it's easier to just treat it as fetching the next line before it's ready.
index < int64(len(task.LogIndexes)) index < int64(len(task.LogIndexes))
if validCursor { if validCursor {
length := step.LogLength - cursor.Cursor length := step.LogLength - cursor.Cursor
offset := task.LogIndexes[index] offset := task.LogIndexes[index]
logRows, err := actions.ReadLogs(ctx, task.LogInStorage, task.LogFilename, offset, length) logRows, err := actions.ReadLogs(ctx, task.LogInStorage, task.LogFilename, offset, length)
if err != nil { if err != nil {
ctx.ServerError("actions.ReadLogs", err) return nil, nil, fmt.Errorf("actions.ReadLogs: %w", err)
return
}
for i, row := range logRows {
logLines = append(logLines, &ViewStepLogLine{
Index: cursor.Cursor + int64(i) + 1, // start at 1
Message: row.Content,
Timestamp: float64(row.Time.AsTime().UnixNano()) / float64(time.Second),
})
}
} }
resp.Logs.StepsLog = append(resp.Logs.StepsLog, &ViewStepLog{ for i, row := range logRows {
Step: cursor.Step, logLines = append(logLines, &ViewStepLogLine{
Cursor: cursor.Cursor + int64(len(logLines)), Index: cursor.Cursor + int64(i) + 1, // start at 1
Lines: logLines, Message: row.Content,
Started: int64(step.Started), Timestamp: float64(row.Time.AsTime().UnixNano()) / float64(time.Second),
}) })
}
} }
logs = append(logs, &ViewStepLog{
Step: cursor.Step,
Cursor: cursor.Cursor + int64(len(logLines)),
Lines: logLines,
Started: int64(step.Started),
})
} }
ctx.JSON(http.StatusOK, resp) return viewJobs, logs, nil
} }
// Rerun will rerun jobs in the given run // Rerun will rerun jobs in the given run

Loading…
Cancel
Save