mirror of https://github.com/databricks/cli.git
Refactor initTestTemplate/deployBundle/destroyBundle to not return errors (#2017)
## Changes These test helpers were updated to handle the error internally and not return it. Since they have testing.T object, they can do so directly. On the caller side, this functions were always followed by require.NoError(t, err), that was cleaned up. This approach helps reduce the setup/teardown boilerplate in the test cases. ## Tests Existing tests.
This commit is contained in:
parent
70b7bbfd81
commit
e5b836a6ac
|
@ -247,12 +247,11 @@ func TestUploadArtifactFileToVolumeThatDoesNotExist(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
bundleRoot, err := initTestTemplate(t, ctx, "artifact_path_with_volume", map[string]any{
|
||||
bundleRoot := initTestTemplate(t, ctx, "artifact_path_with_volume", map[string]any{
|
||||
"unique_id": uuid.New().String(),
|
||||
"schema_name": schemaName,
|
||||
"volume_name": "doesnotexist",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx = env.Set(ctx, "BUNDLE_ROOT", bundleRoot)
|
||||
stdout, stderr, err := testcli.RequireErrorRun(t, ctx, "bundle", "deploy")
|
||||
|
@ -284,12 +283,11 @@ func TestUploadArtifactToVolumeNotYetDeployed(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
bundleRoot, err := initTestTemplate(t, ctx, "artifact_path_with_volume", map[string]any{
|
||||
bundleRoot := initTestTemplate(t, ctx, "artifact_path_with_volume", map[string]any{
|
||||
"unique_id": uuid.New().String(),
|
||||
"schema_name": schemaName,
|
||||
"volume_name": "my_volume",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx = env.Set(ctx, "BUNDLE_ROOT", bundleRoot)
|
||||
stdout, stderr, err := testcli.RequireErrorRun(t, ctx, "bundle", "deploy")
|
||||
|
|
|
@ -16,27 +16,22 @@ func TestBasicBundleDeployWithFailOnActiveRuns(t *testing.T) {
|
|||
|
||||
nodeTypeId := testutil.GetCloud(t).NodeTypeID()
|
||||
uniqueId := uuid.New().String()
|
||||
root, err := initTestTemplate(t, ctx, "basic", map[string]any{
|
||||
root := initTestTemplate(t, ctx, "basic", map[string]any{
|
||||
"unique_id": uniqueId,
|
||||
"node_type_id": nodeTypeId,
|
||||
"spark_version": defaultSparkVersion,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Cleanup(func() {
|
||||
err = destroyBundle(t, ctx, root)
|
||||
require.NoError(t, err)
|
||||
destroyBundle(t, ctx, root)
|
||||
})
|
||||
|
||||
// deploy empty bundle
|
||||
err = deployBundleWithFlags(t, ctx, root, []string{"--fail-on-active-runs"})
|
||||
require.NoError(t, err)
|
||||
deployBundleWithFlags(t, ctx, root, []string{"--fail-on-active-runs"})
|
||||
|
||||
// Remove .databricks directory to simulate a fresh deployment
|
||||
err = os.RemoveAll(filepath.Join(root, ".databricks"))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, os.RemoveAll(filepath.Join(root, ".databricks")))
|
||||
|
||||
// deploy empty bundle again
|
||||
err = deployBundleWithFlags(t, ctx, root, []string{"--fail-on-active-runs"})
|
||||
require.NoError(t, err)
|
||||
deployBundleWithFlags(t, ctx, root, []string{"--fail-on-active-runs"})
|
||||
}
|
||||
|
|
|
@ -23,30 +23,27 @@ func TestBindJobToExistingJob(t *testing.T) {
|
|||
|
||||
nodeTypeId := testutil.GetCloud(t).NodeTypeID()
|
||||
uniqueId := uuid.New().String()
|
||||
bundleRoot, err := initTestTemplate(t, ctx, "basic", map[string]any{
|
||||
bundleRoot := initTestTemplate(t, ctx, "basic", map[string]any{
|
||||
"unique_id": uniqueId,
|
||||
"spark_version": "13.3.x-scala2.12",
|
||||
"node_type_id": nodeTypeId,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
jobId := gt.createTestJob(ctx)
|
||||
t.Cleanup(func() {
|
||||
gt.destroyJob(ctx, jobId)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
ctx = env.Set(ctx, "BUNDLE_ROOT", bundleRoot)
|
||||
c := testcli.NewRunner(t, ctx, "bundle", "deployment", "bind", "foo", fmt.Sprint(jobId), "--auto-approve")
|
||||
_, _, err = c.Run()
|
||||
_, _, err := c.Run()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Remove .databricks directory to simulate a fresh deployment
|
||||
err = os.RemoveAll(filepath.Join(bundleRoot, ".databricks"))
|
||||
require.NoError(t, err)
|
||||
|
||||
err = deployBundle(t, ctx, bundleRoot)
|
||||
require.NoError(t, err)
|
||||
deployBundle(t, ctx, bundleRoot)
|
||||
|
||||
w, err := databricks.NewWorkspaceClient()
|
||||
require.NoError(t, err)
|
||||
|
@ -67,8 +64,7 @@ func TestBindJobToExistingJob(t *testing.T) {
|
|||
err = os.RemoveAll(filepath.Join(bundleRoot, ".databricks"))
|
||||
require.NoError(t, err)
|
||||
|
||||
err = destroyBundle(t, ctx, bundleRoot)
|
||||
require.NoError(t, err)
|
||||
destroyBundle(t, ctx, bundleRoot)
|
||||
|
||||
// Check that job is unbound and exists after bundle is destroyed
|
||||
job, err = w.Jobs.Get(ctx, jobs.GetJobRequest{
|
||||
|
@ -85,18 +81,16 @@ func TestAbortBind(t *testing.T) {
|
|||
|
||||
nodeTypeId := testutil.GetCloud(t).NodeTypeID()
|
||||
uniqueId := uuid.New().String()
|
||||
bundleRoot, err := initTestTemplate(t, ctx, "basic", map[string]any{
|
||||
bundleRoot := initTestTemplate(t, ctx, "basic", map[string]any{
|
||||
"unique_id": uniqueId,
|
||||
"spark_version": "13.3.x-scala2.12",
|
||||
"node_type_id": nodeTypeId,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
jobId := gt.createTestJob(ctx)
|
||||
t.Cleanup(func() {
|
||||
gt.destroyJob(ctx, jobId)
|
||||
err := destroyBundle(t, ctx, bundleRoot)
|
||||
require.NoError(t, err)
|
||||
destroyBundle(t, ctx, bundleRoot)
|
||||
})
|
||||
|
||||
// Bind should fail because prompting is not possible.
|
||||
|
@ -105,12 +99,11 @@ func TestAbortBind(t *testing.T) {
|
|||
c := testcli.NewRunner(t, ctx, "bundle", "deployment", "bind", "foo", fmt.Sprint(jobId))
|
||||
|
||||
// Expect error suggesting to use --auto-approve
|
||||
_, _, err = c.Run()
|
||||
_, _, err := c.Run()
|
||||
assert.ErrorContains(t, err, "failed to bind the resource")
|
||||
assert.ErrorContains(t, err, "This bind operation requires user confirmation, but the current console does not support prompting. Please specify --auto-approve if you would like to skip prompts and proceed")
|
||||
|
||||
err = deployBundle(t, ctx, bundleRoot)
|
||||
require.NoError(t, err)
|
||||
deployBundle(t, ctx, bundleRoot)
|
||||
|
||||
w, err := databricks.NewWorkspaceClient()
|
||||
require.NoError(t, err)
|
||||
|
@ -130,10 +123,9 @@ func TestGenerateAndBind(t *testing.T) {
|
|||
gt := &generateJobTest{T: wt, w: wt.W}
|
||||
|
||||
uniqueId := uuid.New().String()
|
||||
bundleRoot, err := initTestTemplate(t, ctx, "with_includes", map[string]any{
|
||||
bundleRoot := initTestTemplate(t, ctx, "with_includes", map[string]any{
|
||||
"unique_id": uniqueId,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
w, err := databricks.NewWorkspaceClient()
|
||||
require.NoError(t, err)
|
||||
|
@ -169,11 +161,9 @@ func TestGenerateAndBind(t *testing.T) {
|
|||
_, _, err = c.Run()
|
||||
require.NoError(t, err)
|
||||
|
||||
err = deployBundle(t, ctx, bundleRoot)
|
||||
require.NoError(t, err)
|
||||
deployBundle(t, ctx, bundleRoot)
|
||||
|
||||
err = destroyBundle(t, ctx, bundleRoot)
|
||||
require.NoError(t, err)
|
||||
destroyBundle(t, ctx, bundleRoot)
|
||||
|
||||
// Check that job is bound and does not extsts after bundle is destroyed
|
||||
_, err = w.Jobs.Get(ctx, jobs.GetJobRequest{
|
||||
|
|
|
@ -20,16 +20,14 @@ func TestDeployBundleWithCluster(t *testing.T) {
|
|||
|
||||
nodeTypeId := testutil.GetCloud(t).NodeTypeID()
|
||||
uniqueId := uuid.New().String()
|
||||
root, err := initTestTemplate(t, ctx, "clusters", map[string]any{
|
||||
root := initTestTemplate(t, ctx, "clusters", map[string]any{
|
||||
"unique_id": uniqueId,
|
||||
"node_type_id": nodeTypeId,
|
||||
"spark_version": defaultSparkVersion,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Cleanup(func() {
|
||||
err = destroyBundle(t, ctx, root)
|
||||
require.NoError(t, err)
|
||||
destroyBundle(t, ctx, root)
|
||||
|
||||
cluster, err := wt.W.Clusters.GetByClusterName(ctx, fmt.Sprintf("test-cluster-%s", uniqueId))
|
||||
if err != nil {
|
||||
|
@ -39,8 +37,7 @@ func TestDeployBundleWithCluster(t *testing.T) {
|
|||
}
|
||||
})
|
||||
|
||||
err = deployBundle(t, ctx, root)
|
||||
require.NoError(t, err)
|
||||
deployBundle(t, ctx, root)
|
||||
|
||||
// Cluster should exists after bundle deployment
|
||||
cluster, err := wt.W.Clusters.GetByClusterName(ctx, fmt.Sprintf("test-cluster-%s", uniqueId))
|
||||
|
|
|
@ -18,19 +18,16 @@ func TestDashboards(t *testing.T) {
|
|||
|
||||
warehouseID := testutil.GetEnvOrSkipTest(t, "TEST_DEFAULT_WAREHOUSE_ID")
|
||||
uniqueID := uuid.New().String()
|
||||
root, err := initTestTemplate(t, ctx, "dashboards", map[string]any{
|
||||
root := initTestTemplate(t, ctx, "dashboards", map[string]any{
|
||||
"unique_id": uniqueID,
|
||||
"warehouse_id": warehouseID,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Cleanup(func() {
|
||||
err = destroyBundle(t, ctx, root)
|
||||
require.NoError(t, err)
|
||||
destroyBundle(t, ctx, root)
|
||||
})
|
||||
|
||||
err = deployBundle(t, ctx, root)
|
||||
require.NoError(t, err)
|
||||
deployBundle(t, ctx, root)
|
||||
|
||||
// Load bundle configuration by running the validate command.
|
||||
b := unmarshalConfig(t, mustValidateBundle(t, ctx, root))
|
||||
|
@ -55,12 +52,11 @@ func TestDashboards(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
|
||||
// Try to redeploy the bundle and confirm that the out of band modification is detected.
|
||||
stdout, _, err := deployBundleWithArgs(t, ctx, root)
|
||||
stdout, _, err := deployBundleWithArgsErr(t, ctx, root)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, stdout, `Error: dashboard "file_reference" has been modified remotely`+"\n")
|
||||
|
||||
// Redeploy the bundle with the --force flag and confirm that the out of band modification is ignored.
|
||||
_, stderr, err := deployBundleWithArgs(t, ctx, root, "--force")
|
||||
require.NoError(t, err)
|
||||
_, stderr := deployBundleWithArgs(t, ctx, root, "--force")
|
||||
assert.Contains(t, stderr, `Deployment complete!`+"\n")
|
||||
}
|
||||
|
|
|
@ -25,17 +25,14 @@ import (
|
|||
)
|
||||
|
||||
func setupUcSchemaBundle(t *testing.T, ctx context.Context, w *databricks.WorkspaceClient, uniqueId string) string {
|
||||
bundleRoot, err := initTestTemplate(t, ctx, "uc_schema", map[string]any{
|
||||
bundleRoot := initTestTemplate(t, ctx, "uc_schema", map[string]any{
|
||||
"unique_id": uniqueId,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
err = deployBundle(t, ctx, bundleRoot)
|
||||
require.NoError(t, err)
|
||||
deployBundle(t, ctx, bundleRoot)
|
||||
|
||||
t.Cleanup(func() {
|
||||
err := destroyBundle(t, ctx, bundleRoot)
|
||||
require.NoError(t, err)
|
||||
destroyBundle(t, ctx, bundleRoot)
|
||||
})
|
||||
|
||||
// Assert the schema is created
|
||||
|
@ -97,8 +94,7 @@ func TestBundleDeployUcSchema(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
|
||||
// Redeploy the bundle
|
||||
err = deployBundle(t, ctx, bundleRoot)
|
||||
require.NoError(t, err)
|
||||
deployBundle(t, ctx, bundleRoot)
|
||||
|
||||
// Assert the schema is deleted
|
||||
_, err = w.Schemas.GetByFullName(ctx, strings.Join([]string{catalogName, schemaName}, "."))
|
||||
|
@ -135,16 +131,14 @@ func TestBundlePipelineDeleteWithoutAutoApprove(t *testing.T) {
|
|||
|
||||
nodeTypeId := testutil.GetCloud(t).NodeTypeID()
|
||||
uniqueId := uuid.New().String()
|
||||
bundleRoot, err := initTestTemplate(t, ctx, "deploy_then_remove_resources", map[string]any{
|
||||
bundleRoot := initTestTemplate(t, ctx, "deploy_then_remove_resources", map[string]any{
|
||||
"unique_id": uniqueId,
|
||||
"node_type_id": nodeTypeId,
|
||||
"spark_version": defaultSparkVersion,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// deploy pipeline
|
||||
err = deployBundle(t, ctx, bundleRoot)
|
||||
require.NoError(t, err)
|
||||
deployBundle(t, ctx, bundleRoot)
|
||||
|
||||
// assert pipeline is created
|
||||
pipelineName := "test-bundle-pipeline-" + uniqueId
|
||||
|
@ -182,17 +176,14 @@ func TestBundlePipelineRecreateWithoutAutoApprove(t *testing.T) {
|
|||
w := wt.W
|
||||
uniqueId := uuid.New().String()
|
||||
|
||||
bundleRoot, err := initTestTemplate(t, ctx, "recreate_pipeline", map[string]any{
|
||||
bundleRoot := initTestTemplate(t, ctx, "recreate_pipeline", map[string]any{
|
||||
"unique_id": uniqueId,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
err = deployBundle(t, ctx, bundleRoot)
|
||||
require.NoError(t, err)
|
||||
deployBundle(t, ctx, bundleRoot)
|
||||
|
||||
t.Cleanup(func() {
|
||||
err := destroyBundle(t, ctx, bundleRoot)
|
||||
require.NoError(t, err)
|
||||
destroyBundle(t, ctx, bundleRoot)
|
||||
})
|
||||
|
||||
// Assert the pipeline is created
|
||||
|
@ -221,16 +212,14 @@ func TestDeployBasicBundleLogs(t *testing.T) {
|
|||
|
||||
nodeTypeId := testutil.GetCloud(t).NodeTypeID()
|
||||
uniqueId := uuid.New().String()
|
||||
root, err := initTestTemplate(t, ctx, "basic", map[string]any{
|
||||
root := initTestTemplate(t, ctx, "basic", map[string]any{
|
||||
"unique_id": uniqueId,
|
||||
"node_type_id": nodeTypeId,
|
||||
"spark_version": defaultSparkVersion,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Cleanup(func() {
|
||||
err = destroyBundle(t, ctx, root)
|
||||
require.NoError(t, err)
|
||||
destroyBundle(t, ctx, root)
|
||||
})
|
||||
|
||||
currentUser, err := wt.W.CurrentUser.Me(ctx)
|
||||
|
@ -251,17 +240,14 @@ func TestDeployUcVolume(t *testing.T) {
|
|||
w := wt.W
|
||||
|
||||
uniqueId := uuid.New().String()
|
||||
bundleRoot, err := initTestTemplate(t, ctx, "volume", map[string]any{
|
||||
bundleRoot := initTestTemplate(t, ctx, "volume", map[string]any{
|
||||
"unique_id": uniqueId,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
err = deployBundle(t, ctx, bundleRoot)
|
||||
require.NoError(t, err)
|
||||
deployBundle(t, ctx, bundleRoot)
|
||||
|
||||
t.Cleanup(func() {
|
||||
err := destroyBundle(t, ctx, bundleRoot)
|
||||
require.NoError(t, err)
|
||||
destroyBundle(t, ctx, bundleRoot)
|
||||
})
|
||||
|
||||
// Assert the volume is created successfully
|
||||
|
|
|
@ -18,16 +18,14 @@ func TestBundleDeployThenRemoveResources(t *testing.T) {
|
|||
|
||||
nodeTypeId := testutil.GetCloud(t).NodeTypeID()
|
||||
uniqueId := uuid.New().String()
|
||||
bundleRoot, err := initTestTemplate(t, ctx, "deploy_then_remove_resources", map[string]any{
|
||||
bundleRoot := initTestTemplate(t, ctx, "deploy_then_remove_resources", map[string]any{
|
||||
"unique_id": uniqueId,
|
||||
"node_type_id": nodeTypeId,
|
||||
"spark_version": defaultSparkVersion,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// deploy pipeline
|
||||
err = deployBundle(t, ctx, bundleRoot)
|
||||
require.NoError(t, err)
|
||||
deployBundle(t, ctx, bundleRoot)
|
||||
|
||||
// assert pipeline is created
|
||||
pipelineName := "test-bundle-pipeline-" + uniqueId
|
||||
|
@ -46,8 +44,7 @@ func TestBundleDeployThenRemoveResources(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
|
||||
// deploy again
|
||||
err = deployBundle(t, ctx, bundleRoot)
|
||||
require.NoError(t, err)
|
||||
deployBundle(t, ctx, bundleRoot)
|
||||
|
||||
// assert pipeline is deleted
|
||||
_, err = w.Pipelines.GetByName(ctx, pipelineName)
|
||||
|
@ -58,7 +55,6 @@ func TestBundleDeployThenRemoveResources(t *testing.T) {
|
|||
assert.ErrorContains(t, err, "does not exist")
|
||||
|
||||
t.Cleanup(func() {
|
||||
err = destroyBundle(t, ctx, bundleRoot)
|
||||
require.NoError(t, err)
|
||||
destroyBundle(t, ctx, bundleRoot)
|
||||
})
|
||||
}
|
||||
|
|
|
@ -19,19 +19,16 @@ func TestDeployBasicToSharedWorkspacePath(t *testing.T) {
|
|||
currentUser, err := wt.W.CurrentUser.Me(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
bundleRoot, err := initTestTemplate(t, ctx, "basic", map[string]any{
|
||||
bundleRoot := initTestTemplate(t, ctx, "basic", map[string]any{
|
||||
"unique_id": uniqueId,
|
||||
"node_type_id": nodeTypeId,
|
||||
"spark_version": defaultSparkVersion,
|
||||
"root_path": fmt.Sprintf("/Shared/%s", currentUser.UserName),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Cleanup(func() {
|
||||
err = destroyBundle(wt, ctx, bundleRoot)
|
||||
require.NoError(wt, err)
|
||||
destroyBundle(wt, ctx, bundleRoot)
|
||||
})
|
||||
|
||||
err = deployBundle(wt, ctx, bundleRoot)
|
||||
require.NoError(wt, err)
|
||||
deployBundle(wt, ctx, bundleRoot)
|
||||
}
|
||||
|
|
|
@ -20,17 +20,16 @@ func TestFilesAreSyncedCorrectlyWhenNoSnapshot(t *testing.T) {
|
|||
|
||||
nodeTypeId := testutil.GetCloud(t).NodeTypeID()
|
||||
uniqueId := uuid.New().String()
|
||||
bundleRoot, err := initTestTemplate(t, ctx, "basic", map[string]any{
|
||||
bundleRoot := initTestTemplate(t, ctx, "basic", map[string]any{
|
||||
"unique_id": uniqueId,
|
||||
"spark_version": "13.3.x-scala2.12",
|
||||
"node_type_id": nodeTypeId,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx = env.Set(ctx, "BUNDLE_ROOT", bundleRoot)
|
||||
|
||||
// Add some test file to the bundle
|
||||
err = os.WriteFile(filepath.Join(bundleRoot, "test.py"), []byte("print('Hello, World!')"), 0o644)
|
||||
err := os.WriteFile(filepath.Join(bundleRoot, "test.py"), []byte("print('Hello, World!')"), 0o644)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = os.WriteFile(filepath.Join(bundleRoot, "test_to_modify.py"), []byte("print('Hello, World!')"), 0o644)
|
||||
|
@ -40,11 +39,10 @@ func TestFilesAreSyncedCorrectlyWhenNoSnapshot(t *testing.T) {
|
|||
err = os.WriteFile(filepath.Join(bundleRoot, "notebook.py"), []byte("# Databricks notebook source\nHello, World!"), 0o644)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = deployBundle(t, ctx, bundleRoot)
|
||||
require.NoError(t, err)
|
||||
deployBundle(t, ctx, bundleRoot)
|
||||
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, destroyBundle(t, ctx, bundleRoot))
|
||||
destroyBundle(t, ctx, bundleRoot)
|
||||
})
|
||||
|
||||
remoteRoot := getBundleRemoteRootPath(w, t, uniqueId)
|
||||
|
@ -80,8 +78,7 @@ func TestFilesAreSyncedCorrectlyWhenNoSnapshot(t *testing.T) {
|
|||
err = os.WriteFile(filepath.Join(bundleRoot, "test_to_modify.py"), []byte("print('Modified!')"), 0o644)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = deployBundle(t, ctx, bundleRoot)
|
||||
require.NoError(t, err)
|
||||
deployBundle(t, ctx, bundleRoot)
|
||||
|
||||
// Check that removed file is not in workspace anymore
|
||||
_, err = w.Workspace.GetStatusByPath(ctx, path.Join(remoteRoot, "files", "test.py"))
|
||||
|
|
|
@ -20,22 +20,20 @@ func TestBundleDestroy(t *testing.T) {
|
|||
|
||||
nodeTypeId := testutil.GetCloud(t).NodeTypeID()
|
||||
uniqueId := uuid.New().String()
|
||||
bundleRoot, err := initTestTemplate(t, ctx, "deploy_then_remove_resources", map[string]any{
|
||||
bundleRoot := initTestTemplate(t, ctx, "deploy_then_remove_resources", map[string]any{
|
||||
"unique_id": uniqueId,
|
||||
"node_type_id": nodeTypeId,
|
||||
"spark_version": defaultSparkVersion,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
snapshotsDir := filepath.Join(bundleRoot, ".databricks", "bundle", "default", "sync-snapshots")
|
||||
|
||||
// Assert the snapshot file does not exist
|
||||
_, err = os.ReadDir(snapshotsDir)
|
||||
_, err := os.ReadDir(snapshotsDir)
|
||||
assert.ErrorIs(t, err, os.ErrNotExist)
|
||||
|
||||
// deploy resources
|
||||
err = deployBundle(t, ctx, bundleRoot)
|
||||
require.NoError(t, err)
|
||||
deployBundle(t, ctx, bundleRoot)
|
||||
|
||||
// Assert the snapshot file exists
|
||||
entries, err := os.ReadDir(snapshotsDir)
|
||||
|
@ -60,8 +58,7 @@ func TestBundleDestroy(t *testing.T) {
|
|||
assert.Equal(t, job.Settings.Name, jobName)
|
||||
|
||||
// destroy bundle
|
||||
err = destroyBundle(t, ctx, bundleRoot)
|
||||
require.NoError(t, err)
|
||||
destroyBundle(t, ctx, bundleRoot)
|
||||
|
||||
// assert pipeline is deleted
|
||||
_, err = w.Pipelines.GetByName(ctx, pipelineName)
|
||||
|
|
|
@ -26,11 +26,9 @@ func TestEmptyBundleDeploy(t *testing.T) {
|
|||
f.Close()
|
||||
|
||||
// deploy empty bundle
|
||||
err = deployBundle(t, ctx, tmpDir)
|
||||
require.NoError(t, err)
|
||||
deployBundle(t, ctx, tmpDir)
|
||||
|
||||
t.Cleanup(func() {
|
||||
err = destroyBundle(t, ctx, tmpDir)
|
||||
require.NoError(t, err)
|
||||
destroyBundle(t, ctx, tmpDir)
|
||||
})
|
||||
}
|
||||
|
|
|
@ -13,17 +13,14 @@ func TestPythonWheelTaskWithEnvironmentsDeployAndRun(t *testing.T) {
|
|||
|
||||
ctx, _ := acc.WorkspaceTest(t)
|
||||
|
||||
bundleRoot, err := initTestTemplate(t, ctx, "python_wheel_task_with_environments", map[string]any{
|
||||
bundleRoot := initTestTemplate(t, ctx, "python_wheel_task_with_environments", map[string]any{
|
||||
"unique_id": uuid.New().String(),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
err = deployBundle(t, ctx, bundleRoot)
|
||||
require.NoError(t, err)
|
||||
deployBundle(t, ctx, bundleRoot)
|
||||
|
||||
t.Cleanup(func() {
|
||||
err := destroyBundle(t, ctx, bundleRoot)
|
||||
require.NoError(t, err)
|
||||
destroyBundle(t, ctx, bundleRoot)
|
||||
})
|
||||
|
||||
out, err := runResource(t, ctx, bundleRoot, "some_other_job")
|
||||
|
|
|
@ -26,10 +26,9 @@ func TestGenerateFromExistingJobAndDeploy(t *testing.T) {
|
|||
gt := &generateJobTest{T: wt, w: wt.W}
|
||||
|
||||
uniqueId := uuid.New().String()
|
||||
bundleRoot, err := initTestTemplate(t, ctx, "with_includes", map[string]any{
|
||||
bundleRoot := initTestTemplate(t, ctx, "with_includes", map[string]any{
|
||||
"unique_id": uniqueId,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
jobId := gt.createTestJob(ctx)
|
||||
t.Cleanup(func() {
|
||||
|
@ -41,7 +40,7 @@ func TestGenerateFromExistingJobAndDeploy(t *testing.T) {
|
|||
"--existing-job-id", fmt.Sprint(jobId),
|
||||
"--config-dir", filepath.Join(bundleRoot, "resources"),
|
||||
"--source-dir", filepath.Join(bundleRoot, "src"))
|
||||
_, _, err = c.Run()
|
||||
_, _, err := c.Run()
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = os.Stat(filepath.Join(bundleRoot, "src", "test.py"))
|
||||
|
@ -62,11 +61,9 @@ func TestGenerateFromExistingJobAndDeploy(t *testing.T) {
|
|||
require.Contains(t, generatedYaml, "spark_version: 13.3.x-scala2.12")
|
||||
require.Contains(t, generatedYaml, "num_workers: 1")
|
||||
|
||||
err = deployBundle(t, ctx, bundleRoot)
|
||||
require.NoError(t, err)
|
||||
deployBundle(t, ctx, bundleRoot)
|
||||
|
||||
err = destroyBundle(t, ctx, bundleRoot)
|
||||
require.NoError(t, err)
|
||||
destroyBundle(t, ctx, bundleRoot)
|
||||
}
|
||||
|
||||
type generateJobTest struct {
|
||||
|
|
|
@ -25,10 +25,9 @@ func TestGenerateFromExistingPipelineAndDeploy(t *testing.T) {
|
|||
gt := &generatePipelineTest{T: wt, w: wt.W}
|
||||
|
||||
uniqueId := uuid.New().String()
|
||||
bundleRoot, err := initTestTemplate(t, ctx, "with_includes", map[string]any{
|
||||
bundleRoot := initTestTemplate(t, ctx, "with_includes", map[string]any{
|
||||
"unique_id": uniqueId,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
pipelineId, name := gt.createTestPipeline(ctx)
|
||||
t.Cleanup(func() {
|
||||
|
@ -40,7 +39,7 @@ func TestGenerateFromExistingPipelineAndDeploy(t *testing.T) {
|
|||
"--existing-pipeline-id", fmt.Sprint(pipelineId),
|
||||
"--config-dir", filepath.Join(bundleRoot, "resources"),
|
||||
"--source-dir", filepath.Join(bundleRoot, "src"))
|
||||
_, _, err = c.Run()
|
||||
_, _, err := c.Run()
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = os.Stat(filepath.Join(bundleRoot, "src", "notebook.py"))
|
||||
|
@ -70,11 +69,9 @@ func TestGenerateFromExistingPipelineAndDeploy(t *testing.T) {
|
|||
require.Contains(t, generatedYaml, "- file:")
|
||||
require.Contains(t, generatedYaml, fmt.Sprintf("path: %s", filepath.Join("..", "src", "test.py")))
|
||||
|
||||
err = deployBundle(t, ctx, bundleRoot)
|
||||
require.NoError(t, err)
|
||||
deployBundle(t, ctx, bundleRoot)
|
||||
|
||||
err = destroyBundle(t, ctx, bundleRoot)
|
||||
require.NoError(t, err)
|
||||
destroyBundle(t, ctx, bundleRoot)
|
||||
}
|
||||
|
||||
type generatePipelineTest struct {
|
||||
|
|
|
@ -26,18 +26,15 @@ import (
|
|||
|
||||
const defaultSparkVersion = "13.3.x-snapshot-scala2.12"
|
||||
|
||||
func initTestTemplate(t testutil.TestingT, ctx context.Context, templateName string, config map[string]any) (string, error) {
|
||||
func initTestTemplate(t testutil.TestingT, ctx context.Context, templateName string, config map[string]any) string {
|
||||
bundleRoot := t.TempDir()
|
||||
return initTestTemplateWithBundleRoot(t, ctx, templateName, config, bundleRoot)
|
||||
}
|
||||
|
||||
func initTestTemplateWithBundleRoot(t testutil.TestingT, ctx context.Context, templateName string, config map[string]any, bundleRoot string) (string, error) {
|
||||
func initTestTemplateWithBundleRoot(t testutil.TestingT, ctx context.Context, templateName string, config map[string]any, bundleRoot string) string {
|
||||
templateRoot := filepath.Join("bundles", templateName)
|
||||
|
||||
configFilePath, err := writeConfigFile(t, config)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
configFilePath := writeConfigFile(t, config)
|
||||
|
||||
ctx = root.SetWorkspaceClient(ctx, nil)
|
||||
cmd := cmdio.NewIO(ctx, flags.OutputJSON, strings.NewReader(""), os.Stdout, os.Stderr, "", "bundles")
|
||||
|
@ -46,21 +43,21 @@ func initTestTemplateWithBundleRoot(t testutil.TestingT, ctx context.Context, te
|
|||
out, err := filer.NewLocalClient(bundleRoot)
|
||||
require.NoError(t, err)
|
||||
err = template.Materialize(ctx, configFilePath, os.DirFS(templateRoot), out)
|
||||
return bundleRoot, err
|
||||
require.NoError(t, err)
|
||||
return bundleRoot
|
||||
}
|
||||
|
||||
func writeConfigFile(t testutil.TestingT, config map[string]any) (string, error) {
|
||||
func writeConfigFile(t testutil.TestingT, config map[string]any) string {
|
||||
bytes, err := json.Marshal(config)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
dir := t.TempDir()
|
||||
filepath := filepath.Join(dir, "config.json")
|
||||
t.Log("Configuration for template: ", string(bytes))
|
||||
|
||||
err = os.WriteFile(filepath, bytes, 0o644)
|
||||
return filepath, err
|
||||
require.NoError(t, err)
|
||||
return filepath
|
||||
}
|
||||
|
||||
func validateBundle(t testutil.TestingT, ctx context.Context, path string) ([]byte, error) {
|
||||
|
@ -83,14 +80,14 @@ func unmarshalConfig(t testutil.TestingT, data []byte) *bundle.Bundle {
|
|||
return bundle
|
||||
}
|
||||
|
||||
func deployBundle(t testutil.TestingT, ctx context.Context, path string) error {
|
||||
func deployBundle(t testutil.TestingT, ctx context.Context, path string) {
|
||||
ctx = env.Set(ctx, "BUNDLE_ROOT", path)
|
||||
c := testcli.NewRunner(t, ctx, "bundle", "deploy", "--force-lock", "--auto-approve")
|
||||
_, _, err := c.Run()
|
||||
return err
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func deployBundleWithArgs(t testutil.TestingT, ctx context.Context, path string, args ...string) (string, string, error) {
|
||||
func deployBundleWithArgsErr(t testutil.TestingT, ctx context.Context, path string, args ...string) (string, string, error) {
|
||||
ctx = env.Set(ctx, "BUNDLE_ROOT", path)
|
||||
args = append([]string{"bundle", "deploy"}, args...)
|
||||
c := testcli.NewRunner(t, ctx, args...)
|
||||
|
@ -98,13 +95,19 @@ func deployBundleWithArgs(t testutil.TestingT, ctx context.Context, path string,
|
|||
return stdout.String(), stderr.String(), err
|
||||
}
|
||||
|
||||
func deployBundleWithFlags(t testutil.TestingT, ctx context.Context, path string, flags []string) error {
|
||||
func deployBundleWithArgs(t testutil.TestingT, ctx context.Context, path string, args ...string) (string, string) {
|
||||
stdout, stderr, err := deployBundleWithArgsErr(t, ctx, path, args...)
|
||||
require.NoError(t, err)
|
||||
return stdout, stderr
|
||||
}
|
||||
|
||||
func deployBundleWithFlags(t testutil.TestingT, ctx context.Context, path string, flags []string) {
|
||||
ctx = env.Set(ctx, "BUNDLE_ROOT", path)
|
||||
args := []string{"bundle", "deploy", "--force-lock"}
|
||||
args = append(args, flags...)
|
||||
c := testcli.NewRunner(t, ctx, args...)
|
||||
_, _, err := c.Run()
|
||||
return err
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func runResource(t testutil.TestingT, ctx context.Context, path, key string) (string, error) {
|
||||
|
@ -128,11 +131,11 @@ func runResourceWithParams(t testutil.TestingT, ctx context.Context, path, key s
|
|||
return stdout.String(), err
|
||||
}
|
||||
|
||||
func destroyBundle(t testutil.TestingT, ctx context.Context, path string) error {
|
||||
func destroyBundle(t testutil.TestingT, ctx context.Context, path string) {
|
||||
ctx = env.Set(ctx, "BUNDLE_ROOT", path)
|
||||
c := testcli.NewRunner(t, ctx, "bundle", "destroy", "--auto-approve")
|
||||
_, _, err := c.Run()
|
||||
return err
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func getBundleRemoteRootPath(w *databricks.WorkspaceClient, t testutil.TestingT, uniqueId string) string {
|
||||
|
|
|
@ -24,21 +24,18 @@ func TestJobsMetadataFile(t *testing.T) {
|
|||
|
||||
nodeTypeId := testutil.GetCloud(t).NodeTypeID()
|
||||
uniqueId := uuid.New().String()
|
||||
bundleRoot, err := initTestTemplate(t, ctx, "job_metadata", map[string]any{
|
||||
bundleRoot := initTestTemplate(t, ctx, "job_metadata", map[string]any{
|
||||
"unique_id": uniqueId,
|
||||
"node_type_id": nodeTypeId,
|
||||
"spark_version": defaultSparkVersion,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// deploy bundle
|
||||
err = deployBundle(t, ctx, bundleRoot)
|
||||
require.NoError(t, err)
|
||||
deployBundle(t, ctx, bundleRoot)
|
||||
|
||||
// Cleanup the deployed bundle
|
||||
t.Cleanup(func() {
|
||||
err = destroyBundle(t, ctx, bundleRoot)
|
||||
require.NoError(t, err)
|
||||
destroyBundle(t, ctx, bundleRoot)
|
||||
})
|
||||
|
||||
// assert job 1 is created
|
||||
|
|
|
@ -27,16 +27,14 @@ func TestLocalStateStaleness(t *testing.T) {
|
|||
nodeTypeId := testutil.GetCloud(t).NodeTypeID()
|
||||
uniqueId := uuid.New().String()
|
||||
initialize := func() string {
|
||||
root, err := initTestTemplate(t, ctx, "basic", map[string]any{
|
||||
root := initTestTemplate(t, ctx, "basic", map[string]any{
|
||||
"unique_id": uniqueId,
|
||||
"node_type_id": nodeTypeId,
|
||||
"spark_version": defaultSparkVersion,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Cleanup(func() {
|
||||
err = destroyBundle(t, ctx, root)
|
||||
require.NoError(t, err)
|
||||
destroyBundle(t, ctx, root)
|
||||
})
|
||||
|
||||
return root
|
||||
|
@ -48,16 +46,13 @@ func TestLocalStateStaleness(t *testing.T) {
|
|||
bundleB := initialize()
|
||||
|
||||
// 1) Deploy bundle A
|
||||
err = deployBundle(t, ctx, bundleA)
|
||||
require.NoError(t, err)
|
||||
deployBundle(t, ctx, bundleA)
|
||||
|
||||
// 2) Deploy bundle B
|
||||
err = deployBundle(t, ctx, bundleB)
|
||||
require.NoError(t, err)
|
||||
deployBundle(t, ctx, bundleB)
|
||||
|
||||
// 3) Deploy bundle A again
|
||||
err = deployBundle(t, ctx, bundleA)
|
||||
require.NoError(t, err)
|
||||
deployBundle(t, ctx, bundleA)
|
||||
|
||||
// Assert that there is only a single job in the workspace corresponding to this bundle.
|
||||
iter := w.Jobs.List(context.Background(), jobs.ListJobsRequest{
|
||||
|
|
|
@ -15,21 +15,18 @@ func runPythonWheelTest(t *testing.T, templateName, sparkVersion string, pythonW
|
|||
|
||||
nodeTypeId := testutil.GetCloud(t).NodeTypeID()
|
||||
instancePoolId := env.Get(ctx, "TEST_INSTANCE_POOL_ID")
|
||||
bundleRoot, err := initTestTemplate(t, ctx, templateName, map[string]any{
|
||||
bundleRoot := initTestTemplate(t, ctx, templateName, map[string]any{
|
||||
"node_type_id": nodeTypeId,
|
||||
"unique_id": uuid.New().String(),
|
||||
"spark_version": sparkVersion,
|
||||
"python_wheel_wrapper": pythonWheelWrapper,
|
||||
"instance_pool_id": instancePoolId,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
err = deployBundle(t, ctx, bundleRoot)
|
||||
require.NoError(t, err)
|
||||
deployBundle(t, ctx, bundleRoot)
|
||||
|
||||
t.Cleanup(func() {
|
||||
err := destroyBundle(t, ctx, bundleRoot)
|
||||
require.NoError(t, err)
|
||||
destroyBundle(t, ctx, bundleRoot)
|
||||
})
|
||||
|
||||
out, err := runResource(t, ctx, bundleRoot, "some_other_job")
|
||||
|
|
|
@ -15,7 +15,7 @@ func runSparkJarTestCommon(t *testing.T, ctx context.Context, sparkVersion, arti
|
|||
nodeTypeId := testutil.GetCloud(t).NodeTypeID()
|
||||
tmpDir := t.TempDir()
|
||||
instancePoolId := env.Get(ctx, "TEST_INSTANCE_POOL_ID")
|
||||
bundleRoot, err := initTestTemplateWithBundleRoot(t, ctx, "spark_jar_task", map[string]any{
|
||||
bundleRoot := initTestTemplateWithBundleRoot(t, ctx, "spark_jar_task", map[string]any{
|
||||
"node_type_id": nodeTypeId,
|
||||
"unique_id": uuid.New().String(),
|
||||
"spark_version": sparkVersion,
|
||||
|
@ -23,14 +23,11 @@ func runSparkJarTestCommon(t *testing.T, ctx context.Context, sparkVersion, arti
|
|||
"artifact_path": artifactPath,
|
||||
"instance_pool_id": instancePoolId,
|
||||
}, tmpDir)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = deployBundle(t, ctx, bundleRoot)
|
||||
require.NoError(t, err)
|
||||
deployBundle(t, ctx, bundleRoot)
|
||||
|
||||
t.Cleanup(func() {
|
||||
err := destroyBundle(t, ctx, bundleRoot)
|
||||
require.NoError(t, err)
|
||||
destroyBundle(t, ctx, bundleRoot)
|
||||
})
|
||||
|
||||
out, err := runResource(t, ctx, bundleRoot, "jar_job")
|
||||
|
|
Loading…
Reference in New Issue