Add bundle destroy command (#300)

Adds bundle destroy capability to bricks
This commit is contained in:
shreyas-goenka 2023-04-06 12:54:58 +02:00 committed by GitHub
parent 6feaed4990
commit 4871f7bc8a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
14 changed files with 377 additions and 14 deletions

View File

@ -16,6 +16,7 @@ import (
"github.com/databricks/bricks/folders" "github.com/databricks/bricks/folders"
"github.com/databricks/bricks/libs/git" "github.com/databricks/bricks/libs/git"
"github.com/databricks/bricks/libs/locker" "github.com/databricks/bricks/libs/locker"
"github.com/databricks/bricks/libs/terraform"
"github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go"
sdkconfig "github.com/databricks/databricks-sdk-go/config" sdkconfig "github.com/databricks/databricks-sdk-go/config"
"github.com/hashicorp/terraform-exec/tfexec" "github.com/hashicorp/terraform-exec/tfexec"
@ -34,6 +35,12 @@ type Bundle struct {
// Stores the locker responsible for acquiring/releasing a deployment lock. // Stores the locker responsible for acquiring/releasing a deployment lock.
Locker *locker.Locker Locker *locker.Locker
Plan *terraform.Plan
// if true, we skip approval checks for deploy, destroy resources and delete
// files
AutoApprove bool
} }
func Load(path string) (*Bundle, error) { func Load(path string) (*Bundle, error) {

View File

@ -0,0 +1,58 @@
package files
import (
"context"
"fmt"
"os"
"github.com/databricks/bricks/bundle"
"github.com/databricks/bricks/libs/cmdio"
"github.com/databricks/databricks-sdk-go/service/workspace"
"github.com/fatih/color"
)
type delete struct{}
func (m *delete) Name() string {
return "files.Delete"
}
func (m *delete) Apply(ctx context.Context, b *bundle.Bundle) ([]bundle.Mutator, error) {
// Do not delete files if terraform destroy was not consented
if !b.Plan.IsEmpty && !b.Plan.ConfirmApply {
return nil, nil
}
// interface to io with the user
logger, ok := cmdio.FromContext(ctx)
if !ok {
return nil, fmt.Errorf("no logger found")
}
red := color.New(color.FgRed).SprintFunc()
fmt.Fprintf(os.Stderr, "\nRemote directory %s will be deleted\n", b.Config.Workspace.Root)
if !b.AutoApprove {
proceed, err := logger.Ask(fmt.Sprintf("%s and all files in it will be %s Proceed?: ", b.Config.Workspace.Root, red("deleted permanently!")))
if err != nil {
return nil, err
}
if !proceed {
return nil, nil
}
}
err := b.WorkspaceClient().Workspace.Delete(ctx, workspace.Delete{
Path: b.Config.Workspace.Root,
Recursive: true,
})
if err != nil {
return nil, err
}
fmt.Println("Successfully deleted files!")
return nil, nil
}
func Delete() bundle.Mutator {
return &delete{}
}

View File

@ -0,0 +1,126 @@
package terraform
import (
"context"
"fmt"
"os"
"strings"
"github.com/databricks/bricks/bundle"
"github.com/databricks/bricks/libs/cmdio"
"github.com/fatih/color"
"github.com/hashicorp/terraform-exec/tfexec"
tfjson "github.com/hashicorp/terraform-json"
)
// TODO: This is temporary. Come up with a robust way to log mutator progress and
// status events
type PlanResourceChange struct {
ResourceType string `json:"resource_type"`
Action string `json:"action"`
ResourceName string `json:"resource_name"`
}
func (c *PlanResourceChange) String() string {
result := strings.Builder{}
switch c.Action {
case "delete":
result.WriteString(" delete ")
default:
result.WriteString(c.Action + " ")
}
switch c.ResourceType {
case "databricks_job":
result.WriteString("job ")
case "databricks_pipeline":
result.WriteString("pipeline ")
default:
result.WriteString(c.ResourceType + " ")
}
result.WriteString(c.ResourceName)
return result.String()
}
func logDestroyPlan(l *cmdio.Logger, changes []*tfjson.ResourceChange) error {
// TODO: remove once we have mutator logging in place
fmt.Fprintln(os.Stderr, "The following resources will be removed: ")
for _, c := range changes {
if c.Change.Actions.Delete() {
l.Log(&PlanResourceChange{
ResourceType: c.Type,
Action: "delete",
ResourceName: c.Name,
})
}
}
return nil
}
type destroy struct{}
func (w *destroy) Name() string {
return "terraform.Destroy"
}
func (w *destroy) Apply(ctx context.Context, b *bundle.Bundle) ([]bundle.Mutator, error) {
// interface to io with the user
logger, ok := cmdio.FromContext(ctx)
if !ok {
return nil, fmt.Errorf("no logger found")
}
if b.Plan.IsEmpty {
fmt.Fprintln(os.Stderr, "No resources to destroy!")
return nil, nil
}
tf := b.Terraform
if tf == nil {
return nil, fmt.Errorf("terraform not initialized")
}
// read plan file
plan, err := tf.ShowPlanFile(ctx, b.Plan.Path)
if err != nil {
return nil, err
}
// print the resources that will be destroyed
err = logDestroyPlan(logger, plan.ResourceChanges)
if err != nil {
return nil, err
}
// Ask for confirmation, if needed
if !b.Plan.ConfirmApply {
red := color.New(color.FgRed).SprintFunc()
b.Plan.ConfirmApply, err = logger.Ask(fmt.Sprintf("\nThis will permanently %s resources! Proceed? [y/n]: ", red("destroy")))
if err != nil {
return nil, err
}
}
// return if confirmation was not provided
if !b.Plan.ConfirmApply {
return nil, nil
}
if b.Plan.Path == "" {
return nil, fmt.Errorf("no plan found")
}
// Apply terraform according to the computed destroy plan
err = tf.Apply(ctx, tfexec.DirOrPlan(b.Plan.Path))
if err != nil {
return nil, fmt.Errorf("terraform destroy: %w", err)
}
fmt.Fprintln(os.Stderr, "Successfully destroyed resources!")
return nil, nil
}
// Destroy returns a [bundle.Mutator] that runs the conceptual equivalent of
// `terraform destroy ./plan` from the bundle's ephemeral working directory for Terraform.
func Destroy() bundle.Mutator {
return &destroy{}
}

View File

@ -0,0 +1,66 @@
package terraform
import (
"context"
"fmt"
"path/filepath"
"github.com/databricks/bricks/bundle"
"github.com/databricks/bricks/libs/terraform"
"github.com/hashicorp/terraform-exec/tfexec"
)
type PlanGoal string
var (
PlanDeploy = PlanGoal("deploy")
PlanDestroy = PlanGoal("destroy")
)
type plan struct {
goal PlanGoal
}
func (p *plan) Name() string {
return "terraform.Plan"
}
func (p *plan) Apply(ctx context.Context, b *bundle.Bundle) ([]bundle.Mutator, error) {
tf := b.Terraform
if tf == nil {
return nil, fmt.Errorf("terraform not initialized")
}
err := tf.Init(ctx, tfexec.Upgrade(true))
if err != nil {
return nil, fmt.Errorf("terraform init: %w", err)
}
// Persist computed plan
tfDir, err := Dir(b)
if err != nil {
return nil, err
}
planPath := filepath.Join(tfDir, "plan")
destroy := p.goal == PlanDestroy
notEmpty, err := tf.Plan(ctx, tfexec.Destroy(destroy), tfexec.Out(planPath))
if err != nil {
return nil, err
}
// Set plan in main bundle struct for downstream mutators
b.Plan = &terraform.Plan{
Path: planPath,
ConfirmApply: b.AutoApprove,
IsEmpty: !notEmpty,
}
return nil, nil
}
// Plan returns a [bundle.Mutator] that runs the equivalent of `terraform plan -out ./plan`
// from the bundle's ephemeral working directory for Terraform.
func Plan(goal PlanGoal) bundle.Mutator {
return &plan{
goal: goal,
}
}

24
bundle/phases/destroy.go Normal file
View File

@ -0,0 +1,24 @@
package phases
import (
"github.com/databricks/bricks/bundle"
"github.com/databricks/bricks/bundle/deploy/files"
"github.com/databricks/bricks/bundle/deploy/lock"
"github.com/databricks/bricks/bundle/deploy/terraform"
)
// The destroy phase deletes artifacts and resources.
func Destroy() bundle.Mutator {
return newPhase(
"destroy",
[]bundle.Mutator{
lock.Acquire(),
terraform.StatePull(),
terraform.Plan(terraform.PlanGoal("destroy")),
terraform.Destroy(),
terraform.StatePush(),
lock.Release(),
files.Delete(),
},
)
}

View File

@ -8,8 +8,8 @@ import (
"github.com/databricks/bricks/bundle" "github.com/databricks/bricks/bundle"
"github.com/databricks/bricks/bundle/config/resources" "github.com/databricks/bricks/bundle/config/resources"
"github.com/databricks/bricks/libs/cmdio"
"github.com/databricks/bricks/libs/log" "github.com/databricks/bricks/libs/log"
"github.com/databricks/bricks/libs/progress"
"github.com/databricks/databricks-sdk-go/retries" "github.com/databricks/databricks-sdk-go/retries"
"github.com/databricks/databricks-sdk-go/service/jobs" "github.com/databricks/databricks-sdk-go/service/jobs"
"github.com/fatih/color" "github.com/fatih/color"
@ -177,7 +177,7 @@ func logDebugCallback(ctx context.Context, runId *int64) func(info *retries.Info
} }
} }
func logProgressCallback(ctx context.Context, progressLogger *progress.Logger) func(info *retries.Info[jobs.Run]) { func logProgressCallback(ctx context.Context, progressLogger *cmdio.Logger) func(info *retries.Info[jobs.Run]) {
var prevState *jobs.RunState var prevState *jobs.RunState
return func(info *retries.Info[jobs.Run]) { return func(info *retries.Info[jobs.Run]) {
i := info.Info i := info.Info
@ -241,7 +241,7 @@ func (r *jobRunner) Run(ctx context.Context, opts *Options) (RunOutput, error) {
logDebug := logDebugCallback(ctx, runId) logDebug := logDebugCallback(ctx, runId)
// callback to log progress events. Called on every poll request // callback to log progress events. Called on every poll request
progressLogger, ok := progress.FromContext(ctx) progressLogger, ok := cmdio.FromContext(ctx)
if !ok { if !ok {
return nil, fmt.Errorf("no progress logger found") return nil, fmt.Errorf("no progress logger found")
} }

View File

@ -9,9 +9,9 @@ import (
"github.com/databricks/bricks/bundle" "github.com/databricks/bricks/bundle"
"github.com/databricks/bricks/bundle/config/resources" "github.com/databricks/bricks/bundle/config/resources"
"github.com/databricks/bricks/bundle/run/pipeline" "github.com/databricks/bricks/bundle/run/pipeline"
"github.com/databricks/bricks/libs/cmdio"
"github.com/databricks/bricks/libs/flags" "github.com/databricks/bricks/libs/flags"
"github.com/databricks/bricks/libs/log" "github.com/databricks/bricks/libs/log"
"github.com/databricks/bricks/libs/progress"
"github.com/databricks/databricks-sdk-go/service/pipelines" "github.com/databricks/databricks-sdk-go/service/pipelines"
flag "github.com/spf13/pflag" flag "github.com/spf13/pflag"
) )
@ -162,7 +162,7 @@ func (r *pipelineRunner) Run(ctx context.Context, opts *Options) (RunOutput, err
// setup progress logger and tracker to query events // setup progress logger and tracker to query events
updateTracker := pipeline.NewUpdateTracker(pipelineID, updateID, w) updateTracker := pipeline.NewUpdateTracker(pipelineID, updateID, w)
progressLogger, ok := progress.FromContext(ctx) progressLogger, ok := cmdio.FromContext(ctx)
if !ok { if !ok {
return nil, fmt.Errorf("no progress logger found") return nil, fmt.Errorf("no progress logger found")
} }

50
cmd/bundle/destroy.go Normal file
View File

@ -0,0 +1,50 @@
package bundle
import (
"fmt"
"os"
"github.com/databricks/bricks/bundle"
"github.com/databricks/bricks/bundle/phases"
"github.com/databricks/bricks/cmd/root"
"github.com/databricks/bricks/libs/cmdio"
"github.com/databricks/bricks/libs/flags"
"github.com/spf13/cobra"
"golang.org/x/term"
)
var destroyCmd = &cobra.Command{
Use: "destroy",
Short: "Destroy deployed bundle resources",
PreRunE: root.MustConfigureBundle,
RunE: func(cmd *cobra.Command, args []string) error {
b := bundle.Get(cmd.Context())
// If `--force` is specified, force acquisition of the deployment lock.
b.Config.Bundle.Lock.Force = force
// If `--auto-approve`` is specified, we skip confirmation checks
b.AutoApprove = autoApprove
// we require auto-approve for non tty terminals since interactive consent
// is not possible
if !term.IsTerminal(int(os.Stderr.Fd())) && !autoApprove {
return fmt.Errorf("please specify --auto-approve to skip interactive confirmation checks for non tty consoles")
}
ctx := cmdio.NewContext(cmd.Context(), cmdio.NewLogger(flags.ModeAppend))
return bundle.Apply(ctx, b, []bundle.Mutator{
phases.Initialize(),
phases.Build(),
phases.Destroy(),
})
},
}
var autoApprove bool
func init() {
AddCommand(destroyCmd)
destroyCmd.Flags().BoolVar(&autoApprove, "auto-approve", false, "Skip interactive approvals for deleting resources and files")
}

View File

@ -5,8 +5,8 @@ import (
"fmt" "fmt"
"os" "os"
"github.com/databricks/bricks/libs/cmdio"
"github.com/databricks/bricks/libs/flags" "github.com/databricks/bricks/libs/flags"
"github.com/databricks/bricks/libs/progress"
"golang.org/x/term" "golang.org/x/term"
) )
@ -31,8 +31,8 @@ func initializeProgressLogger(ctx context.Context) (context.Context, error) {
format = resolveModeDefault(format) format = resolveModeDefault(format)
} }
progressLogger := progress.NewLogger(format) progressLogger := cmdio.NewLogger(format)
return progress.NewContext(ctx, progressLogger), nil return cmdio.NewContext(ctx, progressLogger), nil
} }
var progressFormat = flags.NewProgressLogFormat() var progressFormat = flags.NewProgressLogFormat()

View File

@ -4,8 +4,8 @@ import (
"context" "context"
"testing" "testing"
"github.com/databricks/bricks/libs/cmdio"
"github.com/databricks/bricks/libs/flags" "github.com/databricks/bricks/libs/flags"
"github.com/databricks/bricks/libs/progress"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
@ -39,7 +39,7 @@ func TestDefaultLoggerModeResolution(t *testing.T) {
require.Equal(t, progressFormat, flags.ModeDefault) require.Equal(t, progressFormat, flags.ModeDefault)
ctx, err := initializeProgressLogger(context.Background()) ctx, err := initializeProgressLogger(context.Background())
require.NoError(t, err) require.NoError(t, err)
logger, ok := progress.FromContext(ctx) logger, ok := cmdio.FromContext(ctx)
assert.True(t, ok) assert.True(t, ok)
assert.Equal(t, logger.Mode, flags.ModeAppend) assert.Equal(t, logger.Mode, flags.ModeAppend)
} }

View File

@ -1,4 +1,4 @@
package progress package cmdio
import ( import (
"context" "context"

View File

@ -1,4 +1,4 @@
package progress package cmdio
type Event interface { type Event interface {
String() string String() string

View File

@ -1,6 +1,7 @@
package progress package cmdio
import ( import (
"bufio"
"encoding/json" "encoding/json"
"io" "io"
"os" "os"
@ -9,7 +10,9 @@ import (
) )
type Logger struct { type Logger struct {
Mode flags.ProgressLogFormat Mode flags.ProgressLogFormat
Reader bufio.Reader
Writer io.Writer Writer io.Writer
isFirstEvent bool isFirstEvent bool
@ -19,10 +22,26 @@ func NewLogger(mode flags.ProgressLogFormat) *Logger {
return &Logger{ return &Logger{
Mode: mode, Mode: mode,
Writer: os.Stderr, Writer: os.Stderr,
Reader: *bufio.NewReader(os.Stdin),
isFirstEvent: true, isFirstEvent: true,
} }
} }
func (l *Logger) Ask(question string) (bool, error) {
l.Writer.Write([]byte(question))
ans, err := l.Reader.ReadString('\n')
if err != nil {
return false, err
}
if ans == "y\n" {
return true, nil
} else {
return false, nil
}
}
func (l *Logger) Log(event Event) { func (l *Logger) Log(event Event) {
switch l.Mode { switch l.Mode {
case flags.ModeInplace: case flags.ModeInplace:

13
libs/terraform/plan.go Normal file
View File

@ -0,0 +1,13 @@
package terraform
type Plan struct {
// Path to the plan
Path string
// Holds whether the user can consented to destruction. Either by interactive
// confirmation or by passing a command line flag
ConfirmApply bool
// If true, the plan is empty and applying it will not do anything
IsEmpty bool
}