2022-12-15 14:12:47 +00:00
|
|
|
package run
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
2023-12-01 10:35:20 +00:00
|
|
|
"encoding/json"
|
2022-12-15 14:12:47 +00:00
|
|
|
"fmt"
|
|
|
|
"strconv"
|
|
|
|
"time"
|
|
|
|
|
2023-05-16 16:35:39 +00:00
|
|
|
"github.com/databricks/cli/bundle"
|
|
|
|
"github.com/databricks/cli/bundle/config/resources"
|
|
|
|
"github.com/databricks/cli/bundle/run/output"
|
|
|
|
"github.com/databricks/cli/bundle/run/progress"
|
|
|
|
"github.com/databricks/cli/libs/cmdio"
|
|
|
|
"github.com/databricks/cli/libs/log"
|
2022-12-15 14:12:47 +00:00
|
|
|
"github.com/databricks/databricks-sdk-go/service/jobs"
|
2023-02-20 22:40:14 +00:00
|
|
|
"github.com/fatih/color"
|
2024-04-22 11:50:13 +00:00
|
|
|
"github.com/spf13/cobra"
|
2024-02-09 14:33:14 +00:00
|
|
|
"golang.org/x/sync/errgroup"
|
2022-12-15 14:12:47 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
// Default timeout for waiting for a job run to complete.
|
2023-09-19 19:54:24 +00:00
|
|
|
var jobRunTimeout time.Duration = 24 * time.Hour
|
2022-12-15 14:12:47 +00:00
|
|
|
|
|
|
|
type jobRunner struct {
|
|
|
|
key
|
|
|
|
|
|
|
|
bundle *bundle.Bundle
|
|
|
|
job *resources.Job
|
|
|
|
}
|
|
|
|
|
2023-09-11 18:03:12 +00:00
|
|
|
func (r *jobRunner) Name() string {
|
|
|
|
if r.job == nil || r.job.JobSettings == nil {
|
|
|
|
return ""
|
|
|
|
}
|
|
|
|
return r.job.JobSettings.Name
|
|
|
|
}
|
|
|
|
|
2023-02-20 22:40:14 +00:00
|
|
|
func isFailed(task jobs.RunTask) bool {
|
|
|
|
return task.State.LifeCycleState == jobs.RunLifeCycleStateInternalError ||
|
|
|
|
(task.State.LifeCycleState == jobs.RunLifeCycleStateTerminated &&
|
|
|
|
task.State.ResultState == jobs.RunResultStateFailed)
|
|
|
|
}
|
|
|
|
|
|
|
|
func isSuccess(task jobs.RunTask) bool {
|
|
|
|
return task.State.LifeCycleState == jobs.RunLifeCycleStateTerminated &&
|
|
|
|
task.State.ResultState == jobs.RunResultStateSuccess
|
|
|
|
}
|
|
|
|
|
|
|
|
func (r *jobRunner) logFailedTasks(ctx context.Context, runId int64) {
|
|
|
|
w := r.bundle.WorkspaceClient()
|
|
|
|
red := color.New(color.FgRed).SprintFunc()
|
|
|
|
green := color.New(color.FgGreen).SprintFunc()
|
|
|
|
yellow := color.New(color.FgYellow).SprintFunc()
|
2023-04-21 08:30:20 +00:00
|
|
|
run, err := w.Jobs.GetRun(ctx, jobs.GetRunRequest{
|
2023-02-20 22:40:14 +00:00
|
|
|
RunId: runId,
|
|
|
|
})
|
|
|
|
if err != nil {
|
2023-03-17 14:17:31 +00:00
|
|
|
log.Errorf(ctx, "failed to log job run. Error: %s", err)
|
2023-02-20 22:40:14 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
if run.State.ResultState == jobs.RunResultStateSuccess {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
for _, task := range run.Tasks {
|
|
|
|
if isSuccess(task) {
|
2023-03-17 14:17:31 +00:00
|
|
|
log.Infof(ctx, "task %s completed successfully", green(task.TaskKey))
|
2023-02-20 22:40:14 +00:00
|
|
|
} else if isFailed(task) {
|
2023-04-21 08:30:20 +00:00
|
|
|
taskInfo, err := w.Jobs.GetRunOutput(ctx, jobs.GetRunOutputRequest{
|
2023-02-20 22:40:14 +00:00
|
|
|
RunId: task.RunId,
|
|
|
|
})
|
|
|
|
if err != nil {
|
2023-03-17 14:17:31 +00:00
|
|
|
log.Errorf(ctx, "task %s failed. Unable to fetch error trace: %s", red(task.TaskKey), err)
|
2023-02-20 22:40:14 +00:00
|
|
|
continue
|
|
|
|
}
|
2023-04-18 12:58:20 +00:00
|
|
|
if progressLogger, ok := cmdio.FromContext(ctx); ok {
|
|
|
|
progressLogger.Log(progress.NewTaskErrorEvent(task.TaskKey, taskInfo.Error, taskInfo.ErrorTrace))
|
|
|
|
}
|
2023-03-17 14:17:31 +00:00
|
|
|
log.Errorf(ctx, "Task %s failed!\nError:\n%s\nTrace:\n%s",
|
2023-02-20 22:40:14 +00:00
|
|
|
red(task.TaskKey), taskInfo.Error, taskInfo.ErrorTrace)
|
|
|
|
} else {
|
2023-03-17 14:17:31 +00:00
|
|
|
log.Infof(ctx, "task %s is in state %s",
|
2023-02-20 22:40:14 +00:00
|
|
|
yellow(task.TaskKey), task.State.LifeCycleState)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-06-12 12:23:21 +00:00
|
|
|
func pullRunIdCallback(runId *int64) func(info *jobs.Run) {
|
|
|
|
return func(i *jobs.Run) {
|
2023-03-29 12:58:09 +00:00
|
|
|
if *runId == 0 {
|
|
|
|
*runId = i.RunId
|
|
|
|
}
|
2022-12-15 14:12:47 +00:00
|
|
|
}
|
2023-03-29 12:58:09 +00:00
|
|
|
}
|
2022-12-15 14:12:47 +00:00
|
|
|
|
2023-06-12 12:23:21 +00:00
|
|
|
func logDebugCallback(ctx context.Context, runId *int64) func(info *jobs.Run) {
|
2022-12-15 14:12:47 +00:00
|
|
|
var prevState *jobs.RunState
|
2023-06-12 12:23:21 +00:00
|
|
|
return func(i *jobs.Run) {
|
2023-01-12 17:58:36 +00:00
|
|
|
state := i.State
|
2022-12-15 14:12:47 +00:00
|
|
|
if state == nil {
|
|
|
|
return
|
|
|
|
}
|
2023-01-12 17:58:36 +00:00
|
|
|
|
2022-12-15 14:12:47 +00:00
|
|
|
// Log the job run URL as soon as it is available.
|
|
|
|
if prevState == nil {
|
2023-06-12 12:23:21 +00:00
|
|
|
log.Infof(ctx, "Run available at %s", i.RunPageUrl)
|
2022-12-15 14:12:47 +00:00
|
|
|
}
|
|
|
|
if prevState == nil || prevState.LifeCycleState != state.LifeCycleState {
|
2023-06-12 12:23:21 +00:00
|
|
|
log.Infof(ctx, "Run status: %s", i.State.LifeCycleState)
|
2022-12-15 14:12:47 +00:00
|
|
|
prevState = state
|
|
|
|
}
|
2023-03-29 12:58:09 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-06-12 12:23:21 +00:00
|
|
|
func logProgressCallback(ctx context.Context, progressLogger *cmdio.Logger) func(info *jobs.Run) {
|
2023-03-29 12:58:09 +00:00
|
|
|
var prevState *jobs.RunState
|
2023-06-12 12:23:21 +00:00
|
|
|
return func(i *jobs.Run) {
|
2023-03-29 12:58:09 +00:00
|
|
|
state := i.State
|
|
|
|
if state == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2023-04-18 12:40:45 +00:00
|
|
|
if prevState == nil {
|
|
|
|
progressLogger.Log(progress.NewJobRunUrlEvent(i.RunPageUrl))
|
|
|
|
}
|
|
|
|
|
2023-03-29 12:58:09 +00:00
|
|
|
if prevState != nil && prevState.LifeCycleState == state.LifeCycleState &&
|
|
|
|
prevState.ResultState == state.ResultState {
|
|
|
|
return
|
|
|
|
} else {
|
|
|
|
prevState = state
|
|
|
|
}
|
|
|
|
|
2023-04-14 12:40:34 +00:00
|
|
|
event := &progress.JobProgressEvent{
|
2023-04-18 12:40:45 +00:00
|
|
|
Timestamp: time.Now(),
|
|
|
|
JobId: i.JobId,
|
|
|
|
RunId: i.RunId,
|
|
|
|
RunName: i.RunName,
|
|
|
|
State: *i.State,
|
2023-02-20 22:40:14 +00:00
|
|
|
}
|
2023-03-29 12:58:09 +00:00
|
|
|
|
|
|
|
// log progress events to stderr
|
|
|
|
progressLogger.Log(event)
|
|
|
|
|
|
|
|
// log progress events in using the default logger
|
|
|
|
log.Infof(ctx, event.String())
|
2022-12-15 14:12:47 +00:00
|
|
|
}
|
2023-03-29 12:58:09 +00:00
|
|
|
}
|
2022-12-15 14:12:47 +00:00
|
|
|
|
2023-04-14 12:40:34 +00:00
|
|
|
func (r *jobRunner) Run(ctx context.Context, opts *Options) (output.RunOutput, error) {
|
2023-03-29 12:58:09 +00:00
|
|
|
jobID, err := strconv.ParseInt(r.job.ID, 10, 64)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("job ID is not an integer: %s", r.job.ID)
|
|
|
|
}
|
|
|
|
|
|
|
|
runId := new(int64)
|
|
|
|
|
2023-12-01 10:35:20 +00:00
|
|
|
err = r.convertPythonParams(opts)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2023-03-29 12:58:09 +00:00
|
|
|
// construct request payload from cmd line flags args
|
2024-01-15 07:42:36 +00:00
|
|
|
req, err := opts.Job.toPayload(r.job, jobID)
|
2022-12-23 14:17:16 +00:00
|
|
|
if err != nil {
|
2023-03-21 15:25:18 +00:00
|
|
|
return nil, err
|
2022-12-23 14:17:16 +00:00
|
|
|
}
|
|
|
|
|
2023-03-17 14:17:31 +00:00
|
|
|
// Include resource key in logger.
|
|
|
|
ctx = log.NewContext(ctx, log.GetLogger(ctx).With("resource", r.Key()))
|
2023-03-29 12:58:09 +00:00
|
|
|
|
2022-12-15 14:12:47 +00:00
|
|
|
w := r.bundle.WorkspaceClient()
|
2023-03-29 12:58:09 +00:00
|
|
|
|
|
|
|
// gets the run id from inside Jobs.RunNowAndWait
|
|
|
|
pullRunId := pullRunIdCallback(runId)
|
|
|
|
|
|
|
|
// callback to log status updates to the universal log destination.
|
|
|
|
// Called on every poll request
|
|
|
|
logDebug := logDebugCallback(ctx, runId)
|
|
|
|
|
|
|
|
// callback to log progress events. Called on every poll request
|
2023-04-06 10:54:58 +00:00
|
|
|
progressLogger, ok := cmdio.FromContext(ctx)
|
2023-03-29 12:58:09 +00:00
|
|
|
if !ok {
|
|
|
|
return nil, fmt.Errorf("no progress logger found")
|
|
|
|
}
|
|
|
|
logProgress := logProgressCallback(ctx, progressLogger)
|
|
|
|
|
2023-06-12 12:23:21 +00:00
|
|
|
waiter, err := w.Jobs.RunNow(ctx, *req)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("cannot start job")
|
|
|
|
}
|
2023-07-12 06:51:54 +00:00
|
|
|
|
|
|
|
if opts.NoWait {
|
|
|
|
details, err := w.Jobs.GetRun(ctx, jobs.GetRunRequest{
|
|
|
|
RunId: waiter.RunId,
|
|
|
|
})
|
|
|
|
progressLogger.Log(progress.NewJobRunUrlEvent(details.RunPageUrl))
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2023-06-12 12:23:21 +00:00
|
|
|
run, err := waiter.OnProgress(func(r *jobs.Run) {
|
|
|
|
pullRunId(r)
|
|
|
|
logDebug(r)
|
|
|
|
logProgress(r)
|
|
|
|
}).GetWithTimeout(jobRunTimeout)
|
2023-02-20 22:40:14 +00:00
|
|
|
if err != nil && runId != nil {
|
|
|
|
r.logFailedTasks(ctx, *runId)
|
|
|
|
}
|
2022-12-15 14:12:47 +00:00
|
|
|
if err != nil {
|
2023-03-21 15:25:18 +00:00
|
|
|
return nil, err
|
2022-12-15 14:12:47 +00:00
|
|
|
}
|
2023-03-21 12:17:15 +00:00
|
|
|
if run.State.LifeCycleState == jobs.RunLifeCycleStateSkipped {
|
|
|
|
log.Infof(ctx, "Run was skipped!")
|
2023-03-21 15:25:18 +00:00
|
|
|
return nil, fmt.Errorf("run skipped: %s", run.State.StateMessage)
|
2023-03-21 12:17:15 +00:00
|
|
|
}
|
2022-12-15 14:12:47 +00:00
|
|
|
|
|
|
|
switch run.State.ResultState {
|
|
|
|
// The run was canceled at user request.
|
|
|
|
case jobs.RunResultStateCanceled:
|
2023-03-17 14:17:31 +00:00
|
|
|
log.Infof(ctx, "Run was cancelled!")
|
2023-03-21 15:25:18 +00:00
|
|
|
return nil, fmt.Errorf("run canceled: %s", run.State.StateMessage)
|
2022-12-15 14:12:47 +00:00
|
|
|
|
|
|
|
// The task completed with an error.
|
|
|
|
case jobs.RunResultStateFailed:
|
2023-03-17 14:17:31 +00:00
|
|
|
log.Infof(ctx, "Run has failed!")
|
2023-03-21 15:25:18 +00:00
|
|
|
return nil, fmt.Errorf("run failed: %s", run.State.StateMessage)
|
2022-12-15 14:12:47 +00:00
|
|
|
|
|
|
|
// The task completed successfully.
|
|
|
|
case jobs.RunResultStateSuccess:
|
2023-03-17 14:17:31 +00:00
|
|
|
log.Infof(ctx, "Run has completed successfully!")
|
2023-04-14 12:40:34 +00:00
|
|
|
return output.GetJobOutput(ctx, r.bundle.WorkspaceClient(), *runId)
|
2022-12-15 14:12:47 +00:00
|
|
|
|
|
|
|
// The run was stopped after reaching the timeout.
|
|
|
|
case jobs.RunResultStateTimedout:
|
2023-03-17 14:17:31 +00:00
|
|
|
log.Infof(ctx, "Run has timed out!")
|
2023-03-21 15:25:18 +00:00
|
|
|
return nil, fmt.Errorf("run timed out: %s", run.State.StateMessage)
|
2022-12-15 14:12:47 +00:00
|
|
|
}
|
|
|
|
|
2023-03-21 15:25:18 +00:00
|
|
|
return nil, err
|
2022-12-15 14:12:47 +00:00
|
|
|
}
|
2023-12-01 10:35:20 +00:00
|
|
|
|
|
|
|
func (r *jobRunner) convertPythonParams(opts *Options) error {
|
|
|
|
if r.bundle.Config.Experimental != nil && !r.bundle.Config.Experimental.PythonWheelWrapper {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
needConvert := false
|
|
|
|
for _, task := range r.job.Tasks {
|
|
|
|
if task.PythonWheelTask != nil {
|
|
|
|
needConvert = true
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if !needConvert {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(opts.Job.pythonParams) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
if opts.Job.notebookParams == nil {
|
|
|
|
opts.Job.notebookParams = make(map[string]string)
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(opts.Job.pythonParams) > 0 {
|
|
|
|
if _, ok := opts.Job.notebookParams["__python_params"]; ok {
|
|
|
|
return fmt.Errorf("can't use __python_params as notebook param, the name is reserved for internal use")
|
|
|
|
}
|
|
|
|
p, err := json.Marshal(opts.Job.pythonParams)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
opts.Job.notebookParams["__python_params"] = string(p)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
2024-02-09 14:33:14 +00:00
|
|
|
|
|
|
|
func (r *jobRunner) Cancel(ctx context.Context) error {
|
|
|
|
w := r.bundle.WorkspaceClient()
|
|
|
|
jobID, err := strconv.ParseInt(r.job.ID, 10, 64)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("job ID is not an integer: %s", r.job.ID)
|
|
|
|
}
|
|
|
|
|
|
|
|
runs, err := w.Jobs.ListRunsAll(ctx, jobs.ListRunsRequest{
|
|
|
|
ActiveOnly: true,
|
|
|
|
JobId: jobID,
|
|
|
|
})
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(runs) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
errGroup, errCtx := errgroup.WithContext(ctx)
|
|
|
|
for _, run := range runs {
|
|
|
|
runId := run.RunId
|
|
|
|
errGroup.Go(func() error {
|
|
|
|
wait, err := w.Jobs.CancelRun(errCtx, jobs.CancelRun{
|
|
|
|
RunId: runId,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
// Waits for the Terminated or Skipped state
|
|
|
|
_, err = wait.GetWithTimeout(jobRunTimeout)
|
|
|
|
return err
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
return errGroup.Wait()
|
|
|
|
}
|
2024-04-22 11:50:13 +00:00
|
|
|
|
2024-10-22 14:59:17 +00:00
|
|
|
func (r *jobRunner) Restart(ctx context.Context, opts *Options) (output.RunOutput, error) {
|
|
|
|
// We don't need to cancel existing runs if the job is continuous and unpaused.
|
|
|
|
// the /jobs/run-now API will automatically cancel any existing runs before starting a new one.
|
|
|
|
//
|
|
|
|
// /jobs/run-now will not cancel existing runs if the job is continuous and paused.
|
|
|
|
// New job runs will be queued instead and will wait for existing runs to finish.
|
|
|
|
// In this case, we need to cancel the existing runs before starting a new one.
|
|
|
|
continuous := r.job.JobSettings.Continuous
|
|
|
|
if continuous != nil && continuous.PauseStatus == jobs.PauseStatusUnpaused {
|
|
|
|
return r.Run(ctx, opts)
|
|
|
|
}
|
|
|
|
|
|
|
|
s := cmdio.Spinner(ctx)
|
|
|
|
s <- "Cancelling all active job runs"
|
|
|
|
err := r.Cancel(ctx)
|
|
|
|
close(s)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return r.Run(ctx, opts)
|
|
|
|
}
|
|
|
|
|
2024-04-22 11:50:13 +00:00
|
|
|
func (r *jobRunner) ParseArgs(args []string, opts *Options) error {
|
|
|
|
return r.posArgsHandler().ParseArgs(args, opts)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (r *jobRunner) CompleteArgs(args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
|
|
|
|
return r.posArgsHandler().CompleteArgs(args, toComplete)
|
|
|
|
}
|