mirror of https://github.com/databricks/cli.git
add prompt and crud test for volumes
This commit is contained in:
parent
88d0402f44
commit
6f9817e194
|
@ -33,7 +33,7 @@ func (volumeConverter) Convert(ctx context.Context, key string, vin dyn.Value, o
|
|||
}
|
||||
|
||||
// Add the converted resource to the output.
|
||||
out.Schema[key] = vout.AsAny()
|
||||
out.Volume[key] = vout.AsAny()
|
||||
|
||||
// Configure grants for this resource.
|
||||
if grants := convertGrantsResource(ctx, vin); grants != nil {
|
||||
|
|
|
@ -22,10 +22,12 @@ import (
|
|||
tfjson "github.com/hashicorp/terraform-json"
|
||||
)
|
||||
|
||||
func parseTerraformActions(changes []*tfjson.ResourceChange, toInclude func(typ string, actions tfjson.Actions) bool) []terraformlib.Action {
|
||||
func filterDeleteOrRecreateActions(changes []*tfjson.ResourceChange, resourceType string) []terraformlib.Action {
|
||||
res := make([]terraformlib.Action, 0)
|
||||
for _, rc := range changes {
|
||||
if !toInclude(rc.Type, rc.Change.Actions) {
|
||||
// TODO: Add end to end integration tests for the interactive prompt UXs.
|
||||
// Good PR to introduce the first one, and make changes more confidently.
|
||||
if rc.Type != resourceType {
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -36,7 +38,7 @@ func parseTerraformActions(changes []*tfjson.ResourceChange, toInclude func(typ
|
|||
case rc.Change.Actions.Replace():
|
||||
actionType = terraformlib.ActionTypeRecreate
|
||||
default:
|
||||
// No use case for other action types yet.
|
||||
// Filter other action types..
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -62,30 +64,12 @@ func approvalForDeploy(ctx context.Context, b *bundle.Bundle) (bool, error) {
|
|||
return false, err
|
||||
}
|
||||
|
||||
schemaActions := parseTerraformActions(plan.ResourceChanges, func(typ string, actions tfjson.Actions) bool {
|
||||
// Filter in only UC schema resources.
|
||||
if typ != "databricks_schema" {
|
||||
return false
|
||||
}
|
||||
|
||||
// We only display prompts for destructive actions like deleting or
|
||||
// recreating a schema.
|
||||
return actions.Delete() || actions.Replace()
|
||||
})
|
||||
|
||||
dltActions := parseTerraformActions(plan.ResourceChanges, func(typ string, actions tfjson.Actions) bool {
|
||||
// Filter in only DLT pipeline resources.
|
||||
if typ != "databricks_pipeline" {
|
||||
return false
|
||||
}
|
||||
|
||||
// Recreating DLT pipeline leads to metadata loss and for a transient period
|
||||
// the underling tables will be unavailable.
|
||||
return actions.Replace() || actions.Delete()
|
||||
})
|
||||
schemaActions := filterDeleteOrRecreateActions(plan.ResourceChanges, "databricks_schema")
|
||||
dltActions := filterDeleteOrRecreateActions(plan.ResourceChanges, "databricks_pipeline")
|
||||
volumeActions := filterDeleteOrRecreateActions(plan.ResourceChanges, "databricks_volume")
|
||||
|
||||
// We don't need to display any prompts in this case.
|
||||
if len(dltActions) == 0 && len(schemaActions) == 0 {
|
||||
if len(schemaActions) == 0 && len(dltActions) == 0 && len(volumeActions) == 0 {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
|
@ -110,6 +94,18 @@ properties such as the 'catalog' or 'storage' are changed:`
|
|||
}
|
||||
}
|
||||
|
||||
// One or more volumes is being recreated.
|
||||
if len(volumeActions) != 0 {
|
||||
msg := `
|
||||
This action will result in the deletion or recreation of the following Volumes. For managed volumes,
|
||||
this typically results in a deletion of the upstream data in the cloud tenant in ~30 days. For external
|
||||
volumes the upstream data in the cloud tenant is not affected:`
|
||||
cmdio.LogString(ctx, msg)
|
||||
for _, action := range volumeActions {
|
||||
cmdio.Log(ctx, action)
|
||||
}
|
||||
}
|
||||
|
||||
if b.AutoApprove {
|
||||
return true, nil
|
||||
}
|
||||
|
|
|
@ -40,7 +40,7 @@ func TestParseTerraformActions(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
res := parseTerraformActions(changes, func(typ string, actions tfjson.Actions) bool {
|
||||
res := filterDeleteOrRecreateActions(changes, func(typ string, actions tfjson.Actions) bool {
|
||||
if typ != "databricks_pipeline" {
|
||||
return false
|
||||
}
|
||||
|
|
|
@ -0,0 +1,8 @@
|
|||
{
|
||||
"properties": {
|
||||
"unique_id": {
|
||||
"type": "string",
|
||||
"description": "Unique ID for the schema and pipeline names"
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,26 @@
|
|||
bundle:
|
||||
name: test-uc-volumes-{{.unique_id}}
|
||||
|
||||
variables:
|
||||
schema_name:
|
||||
default: ${resources.schemas.schema1.name}
|
||||
|
||||
resources:
|
||||
schemas:
|
||||
schema1:
|
||||
name: schema1-{{.unique_id}}
|
||||
catalog_name: main
|
||||
comment: This schema was created from DABs
|
||||
|
||||
schema2:
|
||||
name: schema2-{{.unique_id}}
|
||||
catalog_name: main
|
||||
comment: This schema was created from DABs
|
||||
|
||||
volumes:
|
||||
foo:
|
||||
catalog_name: main
|
||||
name: my_volume
|
||||
schema_name: ${var.schema_name}
|
||||
volume_type: MANAGED
|
||||
comment: This volume was created from DABs.
|
|
@ -0,0 +1,2 @@
|
|||
-- Databricks notebook source
|
||||
select 1
|
|
@ -243,3 +243,56 @@ func TestAccDeployBasicBundleLogs(t *testing.T) {
|
|||
}, "\n"), stderr)
|
||||
assert.Equal(t, "", stdout)
|
||||
}
|
||||
|
||||
func TestAccDeployUcVolume(t *testing.T) {
|
||||
ctx, wt := acc.UcWorkspaceTest(t)
|
||||
w := wt.W
|
||||
|
||||
uniqueId := uuid.New().String()
|
||||
bundleRoot, err := initTestTemplate(t, ctx, "uc_volume", map[string]any{
|
||||
"unique_id": uniqueId,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
err = deployBundle(t, ctx, bundleRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Cleanup(func() {
|
||||
destroyBundle(t, ctx, bundleRoot)
|
||||
})
|
||||
|
||||
// Assert the volume is created successfully
|
||||
catalogName := "main"
|
||||
schemaName := "schema1-" + uniqueId
|
||||
volumeName := "my_volume"
|
||||
volume, err := w.Volumes.ReadByName(ctx, fmt.Sprintf("%s.%s.%s", catalogName, schemaName, volumeName))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, volume.Name, volumeName)
|
||||
require.Equal(t, catalogName, volume.CatalogName)
|
||||
require.Equal(t, schemaName, volume.SchemaName)
|
||||
|
||||
// Recreation of the volume without --auto-approve should fail since prompting is not possible
|
||||
t.Setenv("TERM", "dumb")
|
||||
t.Setenv("BUNDLE_ROOT", bundleRoot)
|
||||
stdout, stderr, err := internal.NewCobraTestRunnerWithContext(t, ctx, "bundle", "deploy", "--var=schema_name=${resources.schemas.schema2.name}").Run()
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, stderr.String(), `This action will result in the deletion or recreation of the following Volumes. For managed volumes,
|
||||
this typically results in a deletion of the upstream data in the cloud tenant in ~30 days. For external
|
||||
volumes the upstream data in the cloud tenant is not affected:
|
||||
recreate volume foo`)
|
||||
assert.Contains(t, stdout.String(), "the deployment requires destructive actions, but current console does not support prompting. Please specify --auto-approve if you would like to skip prompts and proceed")
|
||||
|
||||
// Recreation of the volume without --auto-approve should fail since prompting is not possible
|
||||
t.Setenv("TERM", "dumb")
|
||||
t.Setenv("BUNDLE_ROOT", bundleRoot)
|
||||
_, _, err = internal.NewCobraTestRunnerWithContext(t, ctx, "bundle", "deploy", "--var=schema_name=${resources.schemas.schema2.name}", "--auto-approve").Run()
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Assert the volume is updated successfully
|
||||
schemaName = "schema2-" + uniqueId
|
||||
volume, err = w.Volumes.ReadByName(ctx, fmt.Sprintf("%s.%s.%s", catalogName, schemaName, volumeName))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, volume.Name, volumeName)
|
||||
require.Equal(t, catalogName, volume.CatalogName)
|
||||
require.Equal(t, schemaName, volume.SchemaName)
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue