mirror of https://github.com/databricks/cli.git
Don't merge-in remote resources during depolyments (#1432)
## Changes `check_running_resources` now pulls the remote state without modifying the bundle state, similar to how it was doing before. This avoids a problem when we fail to compute deployment metadata for a deleted job (which we shouldn't do in the first place) `deploy_then_remove_resources_test` now also deploys and deletes a job (in addition to a pipeline), which catches the error that this PR fixes. ## Tests Unit and integ tests
This commit is contained in:
parent
0a21428a48
commit
2035516fde
|
@ -1,4 +1,4 @@
|
||||||
package deploy
|
package terraform
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
@ -6,11 +6,11 @@ import (
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
"github.com/databricks/cli/bundle/config"
|
|
||||||
"github.com/databricks/cli/libs/diag"
|
"github.com/databricks/cli/libs/diag"
|
||||||
"github.com/databricks/databricks-sdk-go"
|
"github.com/databricks/databricks-sdk-go"
|
||||||
"github.com/databricks/databricks-sdk-go/service/jobs"
|
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||||
"github.com/databricks/databricks-sdk-go/service/pipelines"
|
"github.com/databricks/databricks-sdk-go/service/pipelines"
|
||||||
|
tfjson "github.com/hashicorp/terraform-json"
|
||||||
"golang.org/x/sync/errgroup"
|
"golang.org/x/sync/errgroup"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -34,8 +34,14 @@ func (l *checkRunningResources) Apply(ctx context.Context, b *bundle.Bundle) dia
|
||||||
if !b.Config.Bundle.Deployment.FailOnActiveRuns {
|
if !b.Config.Bundle.Deployment.FailOnActiveRuns {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
state, err := ParseResourcesState(ctx, b)
|
||||||
|
if err != nil && state == nil {
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
|
||||||
w := b.WorkspaceClient()
|
w := b.WorkspaceClient()
|
||||||
err := checkAnyResourceRunning(ctx, w, &b.Config.Resources)
|
err = checkAnyResourceRunning(ctx, w, state)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return diag.FromErr(err)
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
@ -46,43 +52,50 @@ func CheckRunningResource() *checkRunningResources {
|
||||||
return &checkRunningResources{}
|
return &checkRunningResources{}
|
||||||
}
|
}
|
||||||
|
|
||||||
func checkAnyResourceRunning(ctx context.Context, w *databricks.WorkspaceClient, resources *config.Resources) error {
|
func checkAnyResourceRunning(ctx context.Context, w *databricks.WorkspaceClient, state *resourcesState) error {
|
||||||
errs, errCtx := errgroup.WithContext(ctx)
|
if state == nil {
|
||||||
|
return nil
|
||||||
for _, job := range resources.Jobs {
|
|
||||||
id := job.ID
|
|
||||||
if id == "" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
errs.Go(func() error {
|
|
||||||
isRunning, err := IsJobRunning(errCtx, w, id)
|
|
||||||
// If there's an error retrieving the job, we assume it's not running
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if isRunning {
|
|
||||||
return &ErrResourceIsRunning{resourceType: "job", resourceId: id}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, pipeline := range resources.Pipelines {
|
errs, errCtx := errgroup.WithContext(ctx)
|
||||||
id := pipeline.ID
|
|
||||||
if id == "" {
|
for _, resource := range state.Resources {
|
||||||
|
if resource.Mode != tfjson.ManagedResourceMode {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
errs.Go(func() error {
|
for _, instance := range resource.Instances {
|
||||||
isRunning, err := IsPipelineRunning(errCtx, w, id)
|
id := instance.Attributes.ID
|
||||||
// If there's an error retrieving the pipeline, we assume it's not running
|
if id == "" {
|
||||||
if err != nil {
|
continue
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
if isRunning {
|
|
||||||
return &ErrResourceIsRunning{resourceType: "pipeline", resourceId: id}
|
switch resource.Type {
|
||||||
|
case "databricks_job":
|
||||||
|
errs.Go(func() error {
|
||||||
|
isRunning, err := IsJobRunning(errCtx, w, id)
|
||||||
|
// If there's an error retrieving the job, we assume it's not running
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if isRunning {
|
||||||
|
return &ErrResourceIsRunning{resourceType: "job", resourceId: id}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
case "databricks_pipeline":
|
||||||
|
errs.Go(func() error {
|
||||||
|
isRunning, err := IsPipelineRunning(errCtx, w, id)
|
||||||
|
// If there's an error retrieving the pipeline, we assume it's not running
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if isRunning {
|
||||||
|
return &ErrResourceIsRunning{resourceType: "pipeline", resourceId: id}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
}
|
}
|
||||||
return nil
|
}
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return errs.Wait()
|
return errs.Wait()
|
|
@ -1,12 +1,10 @@
|
||||||
package deploy
|
package terraform
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle/config"
|
|
||||||
"github.com/databricks/cli/bundle/config/resources"
|
|
||||||
"github.com/databricks/databricks-sdk-go/experimental/mocks"
|
"github.com/databricks/databricks-sdk-go/experimental/mocks"
|
||||||
"github.com/databricks/databricks-sdk-go/service/jobs"
|
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||||
"github.com/databricks/databricks-sdk-go/service/pipelines"
|
"github.com/databricks/databricks-sdk-go/service/pipelines"
|
||||||
|
@ -16,15 +14,22 @@ import (
|
||||||
|
|
||||||
func TestIsAnyResourceRunningWithEmptyState(t *testing.T) {
|
func TestIsAnyResourceRunningWithEmptyState(t *testing.T) {
|
||||||
mock := mocks.NewMockWorkspaceClient(t)
|
mock := mocks.NewMockWorkspaceClient(t)
|
||||||
err := checkAnyResourceRunning(context.Background(), mock.WorkspaceClient, &config.Resources{})
|
err := checkAnyResourceRunning(context.Background(), mock.WorkspaceClient, &resourcesState{})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestIsAnyResourceRunningWithJob(t *testing.T) {
|
func TestIsAnyResourceRunningWithJob(t *testing.T) {
|
||||||
m := mocks.NewMockWorkspaceClient(t)
|
m := mocks.NewMockWorkspaceClient(t)
|
||||||
resources := &config.Resources{
|
resources := &resourcesState{
|
||||||
Jobs: map[string]*resources.Job{
|
Resources: []stateResource{
|
||||||
"job1": {ID: "123"},
|
{
|
||||||
|
Type: "databricks_job",
|
||||||
|
Mode: "managed",
|
||||||
|
Name: "job1",
|
||||||
|
Instances: []stateResourceInstance{
|
||||||
|
{Attributes: stateInstanceAttributes{ID: "123"}},
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -50,9 +55,16 @@ func TestIsAnyResourceRunningWithJob(t *testing.T) {
|
||||||
|
|
||||||
func TestIsAnyResourceRunningWithPipeline(t *testing.T) {
|
func TestIsAnyResourceRunningWithPipeline(t *testing.T) {
|
||||||
m := mocks.NewMockWorkspaceClient(t)
|
m := mocks.NewMockWorkspaceClient(t)
|
||||||
resources := &config.Resources{
|
resources := &resourcesState{
|
||||||
Pipelines: map[string]*resources.Pipeline{
|
Resources: []stateResource{
|
||||||
"pipeline1": {ID: "123"},
|
{
|
||||||
|
Type: "databricks_pipeline",
|
||||||
|
Mode: "managed",
|
||||||
|
Name: "pipeline1",
|
||||||
|
Instances: []stateResourceInstance{
|
||||||
|
{Attributes: stateInstanceAttributes{ID: "123"}},
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -79,9 +91,16 @@ func TestIsAnyResourceRunningWithPipeline(t *testing.T) {
|
||||||
|
|
||||||
func TestIsAnyResourceRunningWithAPIFailure(t *testing.T) {
|
func TestIsAnyResourceRunningWithAPIFailure(t *testing.T) {
|
||||||
m := mocks.NewMockWorkspaceClient(t)
|
m := mocks.NewMockWorkspaceClient(t)
|
||||||
resources := &config.Resources{
|
resources := &resourcesState{
|
||||||
Pipelines: map[string]*resources.Pipeline{
|
Resources: []stateResource{
|
||||||
"pipeline1": {ID: "123"},
|
{
|
||||||
|
Type: "databricks_pipeline",
|
||||||
|
Mode: "managed",
|
||||||
|
Name: "pipeline1",
|
||||||
|
Instances: []stateResourceInstance{
|
||||||
|
{Attributes: stateInstanceAttributes{ID: "123"}},
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
|
@ -36,8 +36,7 @@ func Deploy() bundle.Mutator {
|
||||||
permissions.ApplyWorkspaceRootPermissions(),
|
permissions.ApplyWorkspaceRootPermissions(),
|
||||||
terraform.Interpolate(),
|
terraform.Interpolate(),
|
||||||
terraform.Write(),
|
terraform.Write(),
|
||||||
terraform.Load(),
|
terraform.CheckRunningResource(),
|
||||||
deploy.CheckRunningResource(),
|
|
||||||
bundle.Defer(
|
bundle.Defer(
|
||||||
terraform.Apply(),
|
terraform.Apply(),
|
||||||
bundle.Seq(
|
bundle.Seq(
|
||||||
|
|
|
@ -3,6 +3,14 @@
|
||||||
"unique_id": {
|
"unique_id": {
|
||||||
"type": "string",
|
"type": "string",
|
||||||
"description": "Unique ID for pipeline name"
|
"description": "Unique ID for pipeline name"
|
||||||
|
},
|
||||||
|
"spark_version": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Spark version used for job cluster"
|
||||||
|
},
|
||||||
|
"node_type_id": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Node type id for job cluster"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,2 @@
|
||||||
|
# Databricks notebook source
|
||||||
|
print("hello")
|
|
@ -1,4 +1,15 @@
|
||||||
resources:
|
resources:
|
||||||
|
jobs:
|
||||||
|
foo:
|
||||||
|
name: test-bundle-job-{{.unique_id}}
|
||||||
|
tasks:
|
||||||
|
- task_key: my_notebook_task
|
||||||
|
new_cluster:
|
||||||
|
num_workers: 1
|
||||||
|
spark_version: "{{.spark_version}}"
|
||||||
|
node_type_id: "{{.node_type_id}}"
|
||||||
|
notebook_task:
|
||||||
|
notebook_path: "./bar.py"
|
||||||
pipelines:
|
pipelines:
|
||||||
bar:
|
bar:
|
||||||
name: test-bundle-pipeline-{{.unique_id}}
|
name: test-bundle-pipeline-{{.unique_id}}
|
||||||
|
|
|
@ -5,7 +5,9 @@ import (
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/internal"
|
||||||
"github.com/databricks/cli/internal/acc"
|
"github.com/databricks/cli/internal/acc"
|
||||||
|
"github.com/databricks/cli/libs/env"
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
@ -15,9 +17,12 @@ func TestAccBundleDeployThenRemoveResources(t *testing.T) {
|
||||||
ctx, wt := acc.WorkspaceTest(t)
|
ctx, wt := acc.WorkspaceTest(t)
|
||||||
w := wt.W
|
w := wt.W
|
||||||
|
|
||||||
|
nodeTypeId := internal.GetNodeTypeId(env.Get(ctx, "CLOUD_ENV"))
|
||||||
uniqueId := uuid.New().String()
|
uniqueId := uuid.New().String()
|
||||||
bundleRoot, err := initTestTemplate(t, ctx, "deploy_then_remove_resources", map[string]any{
|
bundleRoot, err := initTestTemplate(t, ctx, "deploy_then_remove_resources", map[string]any{
|
||||||
"unique_id": uniqueId,
|
"unique_id": uniqueId,
|
||||||
|
"node_type_id": nodeTypeId,
|
||||||
|
"spark_version": defaultSparkVersion,
|
||||||
})
|
})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
@ -31,6 +36,12 @@ func TestAccBundleDeployThenRemoveResources(t *testing.T) {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, pipeline.Name, pipelineName)
|
assert.Equal(t, pipeline.Name, pipelineName)
|
||||||
|
|
||||||
|
// assert job is created
|
||||||
|
jobName := "test-bundle-job-" + uniqueId
|
||||||
|
job, err := w.Jobs.GetBySettingsName(ctx, jobName)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, job.Settings.Name, jobName)
|
||||||
|
|
||||||
// delete resources.yml
|
// delete resources.yml
|
||||||
err = os.Remove(filepath.Join(bundleRoot, "resources.yml"))
|
err = os.Remove(filepath.Join(bundleRoot, "resources.yml"))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -43,6 +54,10 @@ func TestAccBundleDeployThenRemoveResources(t *testing.T) {
|
||||||
_, err = w.Pipelines.GetByName(ctx, pipelineName)
|
_, err = w.Pipelines.GetByName(ctx, pipelineName)
|
||||||
assert.ErrorContains(t, err, "does not exist")
|
assert.ErrorContains(t, err, "does not exist")
|
||||||
|
|
||||||
|
// assert job is deleted
|
||||||
|
_, err = w.Jobs.GetBySettingsName(ctx, jobName)
|
||||||
|
assert.ErrorContains(t, err, "does not exist")
|
||||||
|
|
||||||
t.Cleanup(func() {
|
t.Cleanup(func() {
|
||||||
err = destroyBundle(t, ctx, bundleRoot)
|
err = destroyBundle(t, ctx, bundleRoot)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
Loading…
Reference in New Issue