mirror of https://github.com/databricks/cli.git
Refactor initTestTemplate/deployBundle/destroyBundle to not return errors (#2017)
## Changes These test helpers were updated to handle the error internally and not return it. Since they have testing.T object, they can do so directly. On the caller side, this functions were always followed by require.NoError(t, err), that was cleaned up. This approach helps reduce the setup/teardown boilerplate in the test cases. ## Tests Existing tests.
This commit is contained in:
parent
70b7bbfd81
commit
e5b836a6ac
|
@ -247,12 +247,11 @@ func TestUploadArtifactFileToVolumeThatDoesNotExist(t *testing.T) {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
})
|
})
|
||||||
|
|
||||||
bundleRoot, err := initTestTemplate(t, ctx, "artifact_path_with_volume", map[string]any{
|
bundleRoot := initTestTemplate(t, ctx, "artifact_path_with_volume", map[string]any{
|
||||||
"unique_id": uuid.New().String(),
|
"unique_id": uuid.New().String(),
|
||||||
"schema_name": schemaName,
|
"schema_name": schemaName,
|
||||||
"volume_name": "doesnotexist",
|
"volume_name": "doesnotexist",
|
||||||
})
|
})
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
ctx = env.Set(ctx, "BUNDLE_ROOT", bundleRoot)
|
ctx = env.Set(ctx, "BUNDLE_ROOT", bundleRoot)
|
||||||
stdout, stderr, err := testcli.RequireErrorRun(t, ctx, "bundle", "deploy")
|
stdout, stderr, err := testcli.RequireErrorRun(t, ctx, "bundle", "deploy")
|
||||||
|
@ -284,12 +283,11 @@ func TestUploadArtifactToVolumeNotYetDeployed(t *testing.T) {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
})
|
})
|
||||||
|
|
||||||
bundleRoot, err := initTestTemplate(t, ctx, "artifact_path_with_volume", map[string]any{
|
bundleRoot := initTestTemplate(t, ctx, "artifact_path_with_volume", map[string]any{
|
||||||
"unique_id": uuid.New().String(),
|
"unique_id": uuid.New().String(),
|
||||||
"schema_name": schemaName,
|
"schema_name": schemaName,
|
||||||
"volume_name": "my_volume",
|
"volume_name": "my_volume",
|
||||||
})
|
})
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
ctx = env.Set(ctx, "BUNDLE_ROOT", bundleRoot)
|
ctx = env.Set(ctx, "BUNDLE_ROOT", bundleRoot)
|
||||||
stdout, stderr, err := testcli.RequireErrorRun(t, ctx, "bundle", "deploy")
|
stdout, stderr, err := testcli.RequireErrorRun(t, ctx, "bundle", "deploy")
|
||||||
|
|
|
@ -16,27 +16,22 @@ func TestBasicBundleDeployWithFailOnActiveRuns(t *testing.T) {
|
||||||
|
|
||||||
nodeTypeId := testutil.GetCloud(t).NodeTypeID()
|
nodeTypeId := testutil.GetCloud(t).NodeTypeID()
|
||||||
uniqueId := uuid.New().String()
|
uniqueId := uuid.New().String()
|
||||||
root, err := initTestTemplate(t, ctx, "basic", map[string]any{
|
root := initTestTemplate(t, ctx, "basic", map[string]any{
|
||||||
"unique_id": uniqueId,
|
"unique_id": uniqueId,
|
||||||
"node_type_id": nodeTypeId,
|
"node_type_id": nodeTypeId,
|
||||||
"spark_version": defaultSparkVersion,
|
"spark_version": defaultSparkVersion,
|
||||||
})
|
})
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
t.Cleanup(func() {
|
t.Cleanup(func() {
|
||||||
err = destroyBundle(t, ctx, root)
|
destroyBundle(t, ctx, root)
|
||||||
require.NoError(t, err)
|
|
||||||
})
|
})
|
||||||
|
|
||||||
// deploy empty bundle
|
// deploy empty bundle
|
||||||
err = deployBundleWithFlags(t, ctx, root, []string{"--fail-on-active-runs"})
|
deployBundleWithFlags(t, ctx, root, []string{"--fail-on-active-runs"})
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Remove .databricks directory to simulate a fresh deployment
|
// Remove .databricks directory to simulate a fresh deployment
|
||||||
err = os.RemoveAll(filepath.Join(root, ".databricks"))
|
require.NoError(t, os.RemoveAll(filepath.Join(root, ".databricks")))
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// deploy empty bundle again
|
// deploy empty bundle again
|
||||||
err = deployBundleWithFlags(t, ctx, root, []string{"--fail-on-active-runs"})
|
deployBundleWithFlags(t, ctx, root, []string{"--fail-on-active-runs"})
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,30 +23,27 @@ func TestBindJobToExistingJob(t *testing.T) {
|
||||||
|
|
||||||
nodeTypeId := testutil.GetCloud(t).NodeTypeID()
|
nodeTypeId := testutil.GetCloud(t).NodeTypeID()
|
||||||
uniqueId := uuid.New().String()
|
uniqueId := uuid.New().String()
|
||||||
bundleRoot, err := initTestTemplate(t, ctx, "basic", map[string]any{
|
bundleRoot := initTestTemplate(t, ctx, "basic", map[string]any{
|
||||||
"unique_id": uniqueId,
|
"unique_id": uniqueId,
|
||||||
"spark_version": "13.3.x-scala2.12",
|
"spark_version": "13.3.x-scala2.12",
|
||||||
"node_type_id": nodeTypeId,
|
"node_type_id": nodeTypeId,
|
||||||
})
|
})
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
jobId := gt.createTestJob(ctx)
|
jobId := gt.createTestJob(ctx)
|
||||||
t.Cleanup(func() {
|
t.Cleanup(func() {
|
||||||
gt.destroyJob(ctx, jobId)
|
gt.destroyJob(ctx, jobId)
|
||||||
require.NoError(t, err)
|
|
||||||
})
|
})
|
||||||
|
|
||||||
ctx = env.Set(ctx, "BUNDLE_ROOT", bundleRoot)
|
ctx = env.Set(ctx, "BUNDLE_ROOT", bundleRoot)
|
||||||
c := testcli.NewRunner(t, ctx, "bundle", "deployment", "bind", "foo", fmt.Sprint(jobId), "--auto-approve")
|
c := testcli.NewRunner(t, ctx, "bundle", "deployment", "bind", "foo", fmt.Sprint(jobId), "--auto-approve")
|
||||||
_, _, err = c.Run()
|
_, _, err := c.Run()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// Remove .databricks directory to simulate a fresh deployment
|
// Remove .databricks directory to simulate a fresh deployment
|
||||||
err = os.RemoveAll(filepath.Join(bundleRoot, ".databricks"))
|
err = os.RemoveAll(filepath.Join(bundleRoot, ".databricks"))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
err = deployBundle(t, ctx, bundleRoot)
|
deployBundle(t, ctx, bundleRoot)
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
w, err := databricks.NewWorkspaceClient()
|
w, err := databricks.NewWorkspaceClient()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -67,8 +64,7 @@ func TestBindJobToExistingJob(t *testing.T) {
|
||||||
err = os.RemoveAll(filepath.Join(bundleRoot, ".databricks"))
|
err = os.RemoveAll(filepath.Join(bundleRoot, ".databricks"))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
err = destroyBundle(t, ctx, bundleRoot)
|
destroyBundle(t, ctx, bundleRoot)
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Check that job is unbound and exists after bundle is destroyed
|
// Check that job is unbound and exists after bundle is destroyed
|
||||||
job, err = w.Jobs.Get(ctx, jobs.GetJobRequest{
|
job, err = w.Jobs.Get(ctx, jobs.GetJobRequest{
|
||||||
|
@ -85,18 +81,16 @@ func TestAbortBind(t *testing.T) {
|
||||||
|
|
||||||
nodeTypeId := testutil.GetCloud(t).NodeTypeID()
|
nodeTypeId := testutil.GetCloud(t).NodeTypeID()
|
||||||
uniqueId := uuid.New().String()
|
uniqueId := uuid.New().String()
|
||||||
bundleRoot, err := initTestTemplate(t, ctx, "basic", map[string]any{
|
bundleRoot := initTestTemplate(t, ctx, "basic", map[string]any{
|
||||||
"unique_id": uniqueId,
|
"unique_id": uniqueId,
|
||||||
"spark_version": "13.3.x-scala2.12",
|
"spark_version": "13.3.x-scala2.12",
|
||||||
"node_type_id": nodeTypeId,
|
"node_type_id": nodeTypeId,
|
||||||
})
|
})
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
jobId := gt.createTestJob(ctx)
|
jobId := gt.createTestJob(ctx)
|
||||||
t.Cleanup(func() {
|
t.Cleanup(func() {
|
||||||
gt.destroyJob(ctx, jobId)
|
gt.destroyJob(ctx, jobId)
|
||||||
err := destroyBundle(t, ctx, bundleRoot)
|
destroyBundle(t, ctx, bundleRoot)
|
||||||
require.NoError(t, err)
|
|
||||||
})
|
})
|
||||||
|
|
||||||
// Bind should fail because prompting is not possible.
|
// Bind should fail because prompting is not possible.
|
||||||
|
@ -105,12 +99,11 @@ func TestAbortBind(t *testing.T) {
|
||||||
c := testcli.NewRunner(t, ctx, "bundle", "deployment", "bind", "foo", fmt.Sprint(jobId))
|
c := testcli.NewRunner(t, ctx, "bundle", "deployment", "bind", "foo", fmt.Sprint(jobId))
|
||||||
|
|
||||||
// Expect error suggesting to use --auto-approve
|
// Expect error suggesting to use --auto-approve
|
||||||
_, _, err = c.Run()
|
_, _, err := c.Run()
|
||||||
assert.ErrorContains(t, err, "failed to bind the resource")
|
assert.ErrorContains(t, err, "failed to bind the resource")
|
||||||
assert.ErrorContains(t, err, "This bind operation requires user confirmation, but the current console does not support prompting. Please specify --auto-approve if you would like to skip prompts and proceed")
|
assert.ErrorContains(t, err, "This bind operation requires user confirmation, but the current console does not support prompting. Please specify --auto-approve if you would like to skip prompts and proceed")
|
||||||
|
|
||||||
err = deployBundle(t, ctx, bundleRoot)
|
deployBundle(t, ctx, bundleRoot)
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
w, err := databricks.NewWorkspaceClient()
|
w, err := databricks.NewWorkspaceClient()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -130,10 +123,9 @@ func TestGenerateAndBind(t *testing.T) {
|
||||||
gt := &generateJobTest{T: wt, w: wt.W}
|
gt := &generateJobTest{T: wt, w: wt.W}
|
||||||
|
|
||||||
uniqueId := uuid.New().String()
|
uniqueId := uuid.New().String()
|
||||||
bundleRoot, err := initTestTemplate(t, ctx, "with_includes", map[string]any{
|
bundleRoot := initTestTemplate(t, ctx, "with_includes", map[string]any{
|
||||||
"unique_id": uniqueId,
|
"unique_id": uniqueId,
|
||||||
})
|
})
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
w, err := databricks.NewWorkspaceClient()
|
w, err := databricks.NewWorkspaceClient()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -169,11 +161,9 @@ func TestGenerateAndBind(t *testing.T) {
|
||||||
_, _, err = c.Run()
|
_, _, err = c.Run()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
err = deployBundle(t, ctx, bundleRoot)
|
deployBundle(t, ctx, bundleRoot)
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
err = destroyBundle(t, ctx, bundleRoot)
|
destroyBundle(t, ctx, bundleRoot)
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Check that job is bound and does not extsts after bundle is destroyed
|
// Check that job is bound and does not extsts after bundle is destroyed
|
||||||
_, err = w.Jobs.Get(ctx, jobs.GetJobRequest{
|
_, err = w.Jobs.Get(ctx, jobs.GetJobRequest{
|
||||||
|
|
|
@ -20,16 +20,14 @@ func TestDeployBundleWithCluster(t *testing.T) {
|
||||||
|
|
||||||
nodeTypeId := testutil.GetCloud(t).NodeTypeID()
|
nodeTypeId := testutil.GetCloud(t).NodeTypeID()
|
||||||
uniqueId := uuid.New().String()
|
uniqueId := uuid.New().String()
|
||||||
root, err := initTestTemplate(t, ctx, "clusters", map[string]any{
|
root := initTestTemplate(t, ctx, "clusters", map[string]any{
|
||||||
"unique_id": uniqueId,
|
"unique_id": uniqueId,
|
||||||
"node_type_id": nodeTypeId,
|
"node_type_id": nodeTypeId,
|
||||||
"spark_version": defaultSparkVersion,
|
"spark_version": defaultSparkVersion,
|
||||||
})
|
})
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
t.Cleanup(func() {
|
t.Cleanup(func() {
|
||||||
err = destroyBundle(t, ctx, root)
|
destroyBundle(t, ctx, root)
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
cluster, err := wt.W.Clusters.GetByClusterName(ctx, fmt.Sprintf("test-cluster-%s", uniqueId))
|
cluster, err := wt.W.Clusters.GetByClusterName(ctx, fmt.Sprintf("test-cluster-%s", uniqueId))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -39,8 +37,7 @@ func TestDeployBundleWithCluster(t *testing.T) {
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
err = deployBundle(t, ctx, root)
|
deployBundle(t, ctx, root)
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Cluster should exists after bundle deployment
|
// Cluster should exists after bundle deployment
|
||||||
cluster, err := wt.W.Clusters.GetByClusterName(ctx, fmt.Sprintf("test-cluster-%s", uniqueId))
|
cluster, err := wt.W.Clusters.GetByClusterName(ctx, fmt.Sprintf("test-cluster-%s", uniqueId))
|
||||||
|
|
|
@ -18,19 +18,16 @@ func TestDashboards(t *testing.T) {
|
||||||
|
|
||||||
warehouseID := testutil.GetEnvOrSkipTest(t, "TEST_DEFAULT_WAREHOUSE_ID")
|
warehouseID := testutil.GetEnvOrSkipTest(t, "TEST_DEFAULT_WAREHOUSE_ID")
|
||||||
uniqueID := uuid.New().String()
|
uniqueID := uuid.New().String()
|
||||||
root, err := initTestTemplate(t, ctx, "dashboards", map[string]any{
|
root := initTestTemplate(t, ctx, "dashboards", map[string]any{
|
||||||
"unique_id": uniqueID,
|
"unique_id": uniqueID,
|
||||||
"warehouse_id": warehouseID,
|
"warehouse_id": warehouseID,
|
||||||
})
|
})
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
t.Cleanup(func() {
|
t.Cleanup(func() {
|
||||||
err = destroyBundle(t, ctx, root)
|
destroyBundle(t, ctx, root)
|
||||||
require.NoError(t, err)
|
|
||||||
})
|
})
|
||||||
|
|
||||||
err = deployBundle(t, ctx, root)
|
deployBundle(t, ctx, root)
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Load bundle configuration by running the validate command.
|
// Load bundle configuration by running the validate command.
|
||||||
b := unmarshalConfig(t, mustValidateBundle(t, ctx, root))
|
b := unmarshalConfig(t, mustValidateBundle(t, ctx, root))
|
||||||
|
@ -55,12 +52,11 @@ func TestDashboards(t *testing.T) {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// Try to redeploy the bundle and confirm that the out of band modification is detected.
|
// Try to redeploy the bundle and confirm that the out of band modification is detected.
|
||||||
stdout, _, err := deployBundleWithArgs(t, ctx, root)
|
stdout, _, err := deployBundleWithArgsErr(t, ctx, root)
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
assert.Contains(t, stdout, `Error: dashboard "file_reference" has been modified remotely`+"\n")
|
assert.Contains(t, stdout, `Error: dashboard "file_reference" has been modified remotely`+"\n")
|
||||||
|
|
||||||
// Redeploy the bundle with the --force flag and confirm that the out of band modification is ignored.
|
// Redeploy the bundle with the --force flag and confirm that the out of band modification is ignored.
|
||||||
_, stderr, err := deployBundleWithArgs(t, ctx, root, "--force")
|
_, stderr := deployBundleWithArgs(t, ctx, root, "--force")
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Contains(t, stderr, `Deployment complete!`+"\n")
|
assert.Contains(t, stderr, `Deployment complete!`+"\n")
|
||||||
}
|
}
|
||||||
|
|
|
@ -25,17 +25,14 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
func setupUcSchemaBundle(t *testing.T, ctx context.Context, w *databricks.WorkspaceClient, uniqueId string) string {
|
func setupUcSchemaBundle(t *testing.T, ctx context.Context, w *databricks.WorkspaceClient, uniqueId string) string {
|
||||||
bundleRoot, err := initTestTemplate(t, ctx, "uc_schema", map[string]any{
|
bundleRoot := initTestTemplate(t, ctx, "uc_schema", map[string]any{
|
||||||
"unique_id": uniqueId,
|
"unique_id": uniqueId,
|
||||||
})
|
})
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
err = deployBundle(t, ctx, bundleRoot)
|
deployBundle(t, ctx, bundleRoot)
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
t.Cleanup(func() {
|
t.Cleanup(func() {
|
||||||
err := destroyBundle(t, ctx, bundleRoot)
|
destroyBundle(t, ctx, bundleRoot)
|
||||||
require.NoError(t, err)
|
|
||||||
})
|
})
|
||||||
|
|
||||||
// Assert the schema is created
|
// Assert the schema is created
|
||||||
|
@ -97,8 +94,7 @@ func TestBundleDeployUcSchema(t *testing.T) {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// Redeploy the bundle
|
// Redeploy the bundle
|
||||||
err = deployBundle(t, ctx, bundleRoot)
|
deployBundle(t, ctx, bundleRoot)
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Assert the schema is deleted
|
// Assert the schema is deleted
|
||||||
_, err = w.Schemas.GetByFullName(ctx, strings.Join([]string{catalogName, schemaName}, "."))
|
_, err = w.Schemas.GetByFullName(ctx, strings.Join([]string{catalogName, schemaName}, "."))
|
||||||
|
@ -135,16 +131,14 @@ func TestBundlePipelineDeleteWithoutAutoApprove(t *testing.T) {
|
||||||
|
|
||||||
nodeTypeId := testutil.GetCloud(t).NodeTypeID()
|
nodeTypeId := testutil.GetCloud(t).NodeTypeID()
|
||||||
uniqueId := uuid.New().String()
|
uniqueId := uuid.New().String()
|
||||||
bundleRoot, err := initTestTemplate(t, ctx, "deploy_then_remove_resources", map[string]any{
|
bundleRoot := initTestTemplate(t, ctx, "deploy_then_remove_resources", map[string]any{
|
||||||
"unique_id": uniqueId,
|
"unique_id": uniqueId,
|
||||||
"node_type_id": nodeTypeId,
|
"node_type_id": nodeTypeId,
|
||||||
"spark_version": defaultSparkVersion,
|
"spark_version": defaultSparkVersion,
|
||||||
})
|
})
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// deploy pipeline
|
// deploy pipeline
|
||||||
err = deployBundle(t, ctx, bundleRoot)
|
deployBundle(t, ctx, bundleRoot)
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// assert pipeline is created
|
// assert pipeline is created
|
||||||
pipelineName := "test-bundle-pipeline-" + uniqueId
|
pipelineName := "test-bundle-pipeline-" + uniqueId
|
||||||
|
@ -182,17 +176,14 @@ func TestBundlePipelineRecreateWithoutAutoApprove(t *testing.T) {
|
||||||
w := wt.W
|
w := wt.W
|
||||||
uniqueId := uuid.New().String()
|
uniqueId := uuid.New().String()
|
||||||
|
|
||||||
bundleRoot, err := initTestTemplate(t, ctx, "recreate_pipeline", map[string]any{
|
bundleRoot := initTestTemplate(t, ctx, "recreate_pipeline", map[string]any{
|
||||||
"unique_id": uniqueId,
|
"unique_id": uniqueId,
|
||||||
})
|
})
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
err = deployBundle(t, ctx, bundleRoot)
|
deployBundle(t, ctx, bundleRoot)
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
t.Cleanup(func() {
|
t.Cleanup(func() {
|
||||||
err := destroyBundle(t, ctx, bundleRoot)
|
destroyBundle(t, ctx, bundleRoot)
|
||||||
require.NoError(t, err)
|
|
||||||
})
|
})
|
||||||
|
|
||||||
// Assert the pipeline is created
|
// Assert the pipeline is created
|
||||||
|
@ -221,16 +212,14 @@ func TestDeployBasicBundleLogs(t *testing.T) {
|
||||||
|
|
||||||
nodeTypeId := testutil.GetCloud(t).NodeTypeID()
|
nodeTypeId := testutil.GetCloud(t).NodeTypeID()
|
||||||
uniqueId := uuid.New().String()
|
uniqueId := uuid.New().String()
|
||||||
root, err := initTestTemplate(t, ctx, "basic", map[string]any{
|
root := initTestTemplate(t, ctx, "basic", map[string]any{
|
||||||
"unique_id": uniqueId,
|
"unique_id": uniqueId,
|
||||||
"node_type_id": nodeTypeId,
|
"node_type_id": nodeTypeId,
|
||||||
"spark_version": defaultSparkVersion,
|
"spark_version": defaultSparkVersion,
|
||||||
})
|
})
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
t.Cleanup(func() {
|
t.Cleanup(func() {
|
||||||
err = destroyBundle(t, ctx, root)
|
destroyBundle(t, ctx, root)
|
||||||
require.NoError(t, err)
|
|
||||||
})
|
})
|
||||||
|
|
||||||
currentUser, err := wt.W.CurrentUser.Me(ctx)
|
currentUser, err := wt.W.CurrentUser.Me(ctx)
|
||||||
|
@ -251,17 +240,14 @@ func TestDeployUcVolume(t *testing.T) {
|
||||||
w := wt.W
|
w := wt.W
|
||||||
|
|
||||||
uniqueId := uuid.New().String()
|
uniqueId := uuid.New().String()
|
||||||
bundleRoot, err := initTestTemplate(t, ctx, "volume", map[string]any{
|
bundleRoot := initTestTemplate(t, ctx, "volume", map[string]any{
|
||||||
"unique_id": uniqueId,
|
"unique_id": uniqueId,
|
||||||
})
|
})
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
err = deployBundle(t, ctx, bundleRoot)
|
deployBundle(t, ctx, bundleRoot)
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
t.Cleanup(func() {
|
t.Cleanup(func() {
|
||||||
err := destroyBundle(t, ctx, bundleRoot)
|
destroyBundle(t, ctx, bundleRoot)
|
||||||
require.NoError(t, err)
|
|
||||||
})
|
})
|
||||||
|
|
||||||
// Assert the volume is created successfully
|
// Assert the volume is created successfully
|
||||||
|
|
|
@ -18,16 +18,14 @@ func TestBundleDeployThenRemoveResources(t *testing.T) {
|
||||||
|
|
||||||
nodeTypeId := testutil.GetCloud(t).NodeTypeID()
|
nodeTypeId := testutil.GetCloud(t).NodeTypeID()
|
||||||
uniqueId := uuid.New().String()
|
uniqueId := uuid.New().String()
|
||||||
bundleRoot, err := initTestTemplate(t, ctx, "deploy_then_remove_resources", map[string]any{
|
bundleRoot := initTestTemplate(t, ctx, "deploy_then_remove_resources", map[string]any{
|
||||||
"unique_id": uniqueId,
|
"unique_id": uniqueId,
|
||||||
"node_type_id": nodeTypeId,
|
"node_type_id": nodeTypeId,
|
||||||
"spark_version": defaultSparkVersion,
|
"spark_version": defaultSparkVersion,
|
||||||
})
|
})
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// deploy pipeline
|
// deploy pipeline
|
||||||
err = deployBundle(t, ctx, bundleRoot)
|
deployBundle(t, ctx, bundleRoot)
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// assert pipeline is created
|
// assert pipeline is created
|
||||||
pipelineName := "test-bundle-pipeline-" + uniqueId
|
pipelineName := "test-bundle-pipeline-" + uniqueId
|
||||||
|
@ -46,8 +44,7 @@ func TestBundleDeployThenRemoveResources(t *testing.T) {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// deploy again
|
// deploy again
|
||||||
err = deployBundle(t, ctx, bundleRoot)
|
deployBundle(t, ctx, bundleRoot)
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// assert pipeline is deleted
|
// assert pipeline is deleted
|
||||||
_, err = w.Pipelines.GetByName(ctx, pipelineName)
|
_, err = w.Pipelines.GetByName(ctx, pipelineName)
|
||||||
|
@ -58,7 +55,6 @@ func TestBundleDeployThenRemoveResources(t *testing.T) {
|
||||||
assert.ErrorContains(t, err, "does not exist")
|
assert.ErrorContains(t, err, "does not exist")
|
||||||
|
|
||||||
t.Cleanup(func() {
|
t.Cleanup(func() {
|
||||||
err = destroyBundle(t, ctx, bundleRoot)
|
destroyBundle(t, ctx, bundleRoot)
|
||||||
require.NoError(t, err)
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,19 +19,16 @@ func TestDeployBasicToSharedWorkspacePath(t *testing.T) {
|
||||||
currentUser, err := wt.W.CurrentUser.Me(ctx)
|
currentUser, err := wt.W.CurrentUser.Me(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
bundleRoot, err := initTestTemplate(t, ctx, "basic", map[string]any{
|
bundleRoot := initTestTemplate(t, ctx, "basic", map[string]any{
|
||||||
"unique_id": uniqueId,
|
"unique_id": uniqueId,
|
||||||
"node_type_id": nodeTypeId,
|
"node_type_id": nodeTypeId,
|
||||||
"spark_version": defaultSparkVersion,
|
"spark_version": defaultSparkVersion,
|
||||||
"root_path": fmt.Sprintf("/Shared/%s", currentUser.UserName),
|
"root_path": fmt.Sprintf("/Shared/%s", currentUser.UserName),
|
||||||
})
|
})
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
t.Cleanup(func() {
|
t.Cleanup(func() {
|
||||||
err = destroyBundle(wt, ctx, bundleRoot)
|
destroyBundle(wt, ctx, bundleRoot)
|
||||||
require.NoError(wt, err)
|
|
||||||
})
|
})
|
||||||
|
|
||||||
err = deployBundle(wt, ctx, bundleRoot)
|
deployBundle(wt, ctx, bundleRoot)
|
||||||
require.NoError(wt, err)
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,17 +20,16 @@ func TestFilesAreSyncedCorrectlyWhenNoSnapshot(t *testing.T) {
|
||||||
|
|
||||||
nodeTypeId := testutil.GetCloud(t).NodeTypeID()
|
nodeTypeId := testutil.GetCloud(t).NodeTypeID()
|
||||||
uniqueId := uuid.New().String()
|
uniqueId := uuid.New().String()
|
||||||
bundleRoot, err := initTestTemplate(t, ctx, "basic", map[string]any{
|
bundleRoot := initTestTemplate(t, ctx, "basic", map[string]any{
|
||||||
"unique_id": uniqueId,
|
"unique_id": uniqueId,
|
||||||
"spark_version": "13.3.x-scala2.12",
|
"spark_version": "13.3.x-scala2.12",
|
||||||
"node_type_id": nodeTypeId,
|
"node_type_id": nodeTypeId,
|
||||||
})
|
})
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
ctx = env.Set(ctx, "BUNDLE_ROOT", bundleRoot)
|
ctx = env.Set(ctx, "BUNDLE_ROOT", bundleRoot)
|
||||||
|
|
||||||
// Add some test file to the bundle
|
// Add some test file to the bundle
|
||||||
err = os.WriteFile(filepath.Join(bundleRoot, "test.py"), []byte("print('Hello, World!')"), 0o644)
|
err := os.WriteFile(filepath.Join(bundleRoot, "test.py"), []byte("print('Hello, World!')"), 0o644)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
err = os.WriteFile(filepath.Join(bundleRoot, "test_to_modify.py"), []byte("print('Hello, World!')"), 0o644)
|
err = os.WriteFile(filepath.Join(bundleRoot, "test_to_modify.py"), []byte("print('Hello, World!')"), 0o644)
|
||||||
|
@ -40,11 +39,10 @@ func TestFilesAreSyncedCorrectlyWhenNoSnapshot(t *testing.T) {
|
||||||
err = os.WriteFile(filepath.Join(bundleRoot, "notebook.py"), []byte("# Databricks notebook source\nHello, World!"), 0o644)
|
err = os.WriteFile(filepath.Join(bundleRoot, "notebook.py"), []byte("# Databricks notebook source\nHello, World!"), 0o644)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
err = deployBundle(t, ctx, bundleRoot)
|
deployBundle(t, ctx, bundleRoot)
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
t.Cleanup(func() {
|
t.Cleanup(func() {
|
||||||
require.NoError(t, destroyBundle(t, ctx, bundleRoot))
|
destroyBundle(t, ctx, bundleRoot)
|
||||||
})
|
})
|
||||||
|
|
||||||
remoteRoot := getBundleRemoteRootPath(w, t, uniqueId)
|
remoteRoot := getBundleRemoteRootPath(w, t, uniqueId)
|
||||||
|
@ -80,8 +78,7 @@ func TestFilesAreSyncedCorrectlyWhenNoSnapshot(t *testing.T) {
|
||||||
err = os.WriteFile(filepath.Join(bundleRoot, "test_to_modify.py"), []byte("print('Modified!')"), 0o644)
|
err = os.WriteFile(filepath.Join(bundleRoot, "test_to_modify.py"), []byte("print('Modified!')"), 0o644)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
err = deployBundle(t, ctx, bundleRoot)
|
deployBundle(t, ctx, bundleRoot)
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Check that removed file is not in workspace anymore
|
// Check that removed file is not in workspace anymore
|
||||||
_, err = w.Workspace.GetStatusByPath(ctx, path.Join(remoteRoot, "files", "test.py"))
|
_, err = w.Workspace.GetStatusByPath(ctx, path.Join(remoteRoot, "files", "test.py"))
|
||||||
|
|
|
@ -20,22 +20,20 @@ func TestBundleDestroy(t *testing.T) {
|
||||||
|
|
||||||
nodeTypeId := testutil.GetCloud(t).NodeTypeID()
|
nodeTypeId := testutil.GetCloud(t).NodeTypeID()
|
||||||
uniqueId := uuid.New().String()
|
uniqueId := uuid.New().String()
|
||||||
bundleRoot, err := initTestTemplate(t, ctx, "deploy_then_remove_resources", map[string]any{
|
bundleRoot := initTestTemplate(t, ctx, "deploy_then_remove_resources", map[string]any{
|
||||||
"unique_id": uniqueId,
|
"unique_id": uniqueId,
|
||||||
"node_type_id": nodeTypeId,
|
"node_type_id": nodeTypeId,
|
||||||
"spark_version": defaultSparkVersion,
|
"spark_version": defaultSparkVersion,
|
||||||
})
|
})
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
snapshotsDir := filepath.Join(bundleRoot, ".databricks", "bundle", "default", "sync-snapshots")
|
snapshotsDir := filepath.Join(bundleRoot, ".databricks", "bundle", "default", "sync-snapshots")
|
||||||
|
|
||||||
// Assert the snapshot file does not exist
|
// Assert the snapshot file does not exist
|
||||||
_, err = os.ReadDir(snapshotsDir)
|
_, err := os.ReadDir(snapshotsDir)
|
||||||
assert.ErrorIs(t, err, os.ErrNotExist)
|
assert.ErrorIs(t, err, os.ErrNotExist)
|
||||||
|
|
||||||
// deploy resources
|
// deploy resources
|
||||||
err = deployBundle(t, ctx, bundleRoot)
|
deployBundle(t, ctx, bundleRoot)
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Assert the snapshot file exists
|
// Assert the snapshot file exists
|
||||||
entries, err := os.ReadDir(snapshotsDir)
|
entries, err := os.ReadDir(snapshotsDir)
|
||||||
|
@ -60,8 +58,7 @@ func TestBundleDestroy(t *testing.T) {
|
||||||
assert.Equal(t, job.Settings.Name, jobName)
|
assert.Equal(t, job.Settings.Name, jobName)
|
||||||
|
|
||||||
// destroy bundle
|
// destroy bundle
|
||||||
err = destroyBundle(t, ctx, bundleRoot)
|
destroyBundle(t, ctx, bundleRoot)
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// assert pipeline is deleted
|
// assert pipeline is deleted
|
||||||
_, err = w.Pipelines.GetByName(ctx, pipelineName)
|
_, err = w.Pipelines.GetByName(ctx, pipelineName)
|
||||||
|
|
|
@ -26,11 +26,9 @@ func TestEmptyBundleDeploy(t *testing.T) {
|
||||||
f.Close()
|
f.Close()
|
||||||
|
|
||||||
// deploy empty bundle
|
// deploy empty bundle
|
||||||
err = deployBundle(t, ctx, tmpDir)
|
deployBundle(t, ctx, tmpDir)
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
t.Cleanup(func() {
|
t.Cleanup(func() {
|
||||||
err = destroyBundle(t, ctx, tmpDir)
|
destroyBundle(t, ctx, tmpDir)
|
||||||
require.NoError(t, err)
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
|
@ -13,17 +13,14 @@ func TestPythonWheelTaskWithEnvironmentsDeployAndRun(t *testing.T) {
|
||||||
|
|
||||||
ctx, _ := acc.WorkspaceTest(t)
|
ctx, _ := acc.WorkspaceTest(t)
|
||||||
|
|
||||||
bundleRoot, err := initTestTemplate(t, ctx, "python_wheel_task_with_environments", map[string]any{
|
bundleRoot := initTestTemplate(t, ctx, "python_wheel_task_with_environments", map[string]any{
|
||||||
"unique_id": uuid.New().String(),
|
"unique_id": uuid.New().String(),
|
||||||
})
|
})
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
err = deployBundle(t, ctx, bundleRoot)
|
deployBundle(t, ctx, bundleRoot)
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
t.Cleanup(func() {
|
t.Cleanup(func() {
|
||||||
err := destroyBundle(t, ctx, bundleRoot)
|
destroyBundle(t, ctx, bundleRoot)
|
||||||
require.NoError(t, err)
|
|
||||||
})
|
})
|
||||||
|
|
||||||
out, err := runResource(t, ctx, bundleRoot, "some_other_job")
|
out, err := runResource(t, ctx, bundleRoot, "some_other_job")
|
||||||
|
|
|
@ -26,10 +26,9 @@ func TestGenerateFromExistingJobAndDeploy(t *testing.T) {
|
||||||
gt := &generateJobTest{T: wt, w: wt.W}
|
gt := &generateJobTest{T: wt, w: wt.W}
|
||||||
|
|
||||||
uniqueId := uuid.New().String()
|
uniqueId := uuid.New().String()
|
||||||
bundleRoot, err := initTestTemplate(t, ctx, "with_includes", map[string]any{
|
bundleRoot := initTestTemplate(t, ctx, "with_includes", map[string]any{
|
||||||
"unique_id": uniqueId,
|
"unique_id": uniqueId,
|
||||||
})
|
})
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
jobId := gt.createTestJob(ctx)
|
jobId := gt.createTestJob(ctx)
|
||||||
t.Cleanup(func() {
|
t.Cleanup(func() {
|
||||||
|
@ -41,7 +40,7 @@ func TestGenerateFromExistingJobAndDeploy(t *testing.T) {
|
||||||
"--existing-job-id", fmt.Sprint(jobId),
|
"--existing-job-id", fmt.Sprint(jobId),
|
||||||
"--config-dir", filepath.Join(bundleRoot, "resources"),
|
"--config-dir", filepath.Join(bundleRoot, "resources"),
|
||||||
"--source-dir", filepath.Join(bundleRoot, "src"))
|
"--source-dir", filepath.Join(bundleRoot, "src"))
|
||||||
_, _, err = c.Run()
|
_, _, err := c.Run()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
_, err = os.Stat(filepath.Join(bundleRoot, "src", "test.py"))
|
_, err = os.Stat(filepath.Join(bundleRoot, "src", "test.py"))
|
||||||
|
@ -62,11 +61,9 @@ func TestGenerateFromExistingJobAndDeploy(t *testing.T) {
|
||||||
require.Contains(t, generatedYaml, "spark_version: 13.3.x-scala2.12")
|
require.Contains(t, generatedYaml, "spark_version: 13.3.x-scala2.12")
|
||||||
require.Contains(t, generatedYaml, "num_workers: 1")
|
require.Contains(t, generatedYaml, "num_workers: 1")
|
||||||
|
|
||||||
err = deployBundle(t, ctx, bundleRoot)
|
deployBundle(t, ctx, bundleRoot)
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
err = destroyBundle(t, ctx, bundleRoot)
|
destroyBundle(t, ctx, bundleRoot)
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type generateJobTest struct {
|
type generateJobTest struct {
|
||||||
|
|
|
@ -25,10 +25,9 @@ func TestGenerateFromExistingPipelineAndDeploy(t *testing.T) {
|
||||||
gt := &generatePipelineTest{T: wt, w: wt.W}
|
gt := &generatePipelineTest{T: wt, w: wt.W}
|
||||||
|
|
||||||
uniqueId := uuid.New().String()
|
uniqueId := uuid.New().String()
|
||||||
bundleRoot, err := initTestTemplate(t, ctx, "with_includes", map[string]any{
|
bundleRoot := initTestTemplate(t, ctx, "with_includes", map[string]any{
|
||||||
"unique_id": uniqueId,
|
"unique_id": uniqueId,
|
||||||
})
|
})
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
pipelineId, name := gt.createTestPipeline(ctx)
|
pipelineId, name := gt.createTestPipeline(ctx)
|
||||||
t.Cleanup(func() {
|
t.Cleanup(func() {
|
||||||
|
@ -40,7 +39,7 @@ func TestGenerateFromExistingPipelineAndDeploy(t *testing.T) {
|
||||||
"--existing-pipeline-id", fmt.Sprint(pipelineId),
|
"--existing-pipeline-id", fmt.Sprint(pipelineId),
|
||||||
"--config-dir", filepath.Join(bundleRoot, "resources"),
|
"--config-dir", filepath.Join(bundleRoot, "resources"),
|
||||||
"--source-dir", filepath.Join(bundleRoot, "src"))
|
"--source-dir", filepath.Join(bundleRoot, "src"))
|
||||||
_, _, err = c.Run()
|
_, _, err := c.Run()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
_, err = os.Stat(filepath.Join(bundleRoot, "src", "notebook.py"))
|
_, err = os.Stat(filepath.Join(bundleRoot, "src", "notebook.py"))
|
||||||
|
@ -70,11 +69,9 @@ func TestGenerateFromExistingPipelineAndDeploy(t *testing.T) {
|
||||||
require.Contains(t, generatedYaml, "- file:")
|
require.Contains(t, generatedYaml, "- file:")
|
||||||
require.Contains(t, generatedYaml, fmt.Sprintf("path: %s", filepath.Join("..", "src", "test.py")))
|
require.Contains(t, generatedYaml, fmt.Sprintf("path: %s", filepath.Join("..", "src", "test.py")))
|
||||||
|
|
||||||
err = deployBundle(t, ctx, bundleRoot)
|
deployBundle(t, ctx, bundleRoot)
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
err = destroyBundle(t, ctx, bundleRoot)
|
destroyBundle(t, ctx, bundleRoot)
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type generatePipelineTest struct {
|
type generatePipelineTest struct {
|
||||||
|
|
|
@ -26,18 +26,15 @@ import (
|
||||||
|
|
||||||
const defaultSparkVersion = "13.3.x-snapshot-scala2.12"
|
const defaultSparkVersion = "13.3.x-snapshot-scala2.12"
|
||||||
|
|
||||||
func initTestTemplate(t testutil.TestingT, ctx context.Context, templateName string, config map[string]any) (string, error) {
|
func initTestTemplate(t testutil.TestingT, ctx context.Context, templateName string, config map[string]any) string {
|
||||||
bundleRoot := t.TempDir()
|
bundleRoot := t.TempDir()
|
||||||
return initTestTemplateWithBundleRoot(t, ctx, templateName, config, bundleRoot)
|
return initTestTemplateWithBundleRoot(t, ctx, templateName, config, bundleRoot)
|
||||||
}
|
}
|
||||||
|
|
||||||
func initTestTemplateWithBundleRoot(t testutil.TestingT, ctx context.Context, templateName string, config map[string]any, bundleRoot string) (string, error) {
|
func initTestTemplateWithBundleRoot(t testutil.TestingT, ctx context.Context, templateName string, config map[string]any, bundleRoot string) string {
|
||||||
templateRoot := filepath.Join("bundles", templateName)
|
templateRoot := filepath.Join("bundles", templateName)
|
||||||
|
|
||||||
configFilePath, err := writeConfigFile(t, config)
|
configFilePath := writeConfigFile(t, config)
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx = root.SetWorkspaceClient(ctx, nil)
|
ctx = root.SetWorkspaceClient(ctx, nil)
|
||||||
cmd := cmdio.NewIO(ctx, flags.OutputJSON, strings.NewReader(""), os.Stdout, os.Stderr, "", "bundles")
|
cmd := cmdio.NewIO(ctx, flags.OutputJSON, strings.NewReader(""), os.Stdout, os.Stderr, "", "bundles")
|
||||||
|
@ -46,21 +43,21 @@ func initTestTemplateWithBundleRoot(t testutil.TestingT, ctx context.Context, te
|
||||||
out, err := filer.NewLocalClient(bundleRoot)
|
out, err := filer.NewLocalClient(bundleRoot)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
err = template.Materialize(ctx, configFilePath, os.DirFS(templateRoot), out)
|
err = template.Materialize(ctx, configFilePath, os.DirFS(templateRoot), out)
|
||||||
return bundleRoot, err
|
require.NoError(t, err)
|
||||||
|
return bundleRoot
|
||||||
}
|
}
|
||||||
|
|
||||||
func writeConfigFile(t testutil.TestingT, config map[string]any) (string, error) {
|
func writeConfigFile(t testutil.TestingT, config map[string]any) string {
|
||||||
bytes, err := json.Marshal(config)
|
bytes, err := json.Marshal(config)
|
||||||
if err != nil {
|
require.NoError(t, err)
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
dir := t.TempDir()
|
dir := t.TempDir()
|
||||||
filepath := filepath.Join(dir, "config.json")
|
filepath := filepath.Join(dir, "config.json")
|
||||||
t.Log("Configuration for template: ", string(bytes))
|
t.Log("Configuration for template: ", string(bytes))
|
||||||
|
|
||||||
err = os.WriteFile(filepath, bytes, 0o644)
|
err = os.WriteFile(filepath, bytes, 0o644)
|
||||||
return filepath, err
|
require.NoError(t, err)
|
||||||
|
return filepath
|
||||||
}
|
}
|
||||||
|
|
||||||
func validateBundle(t testutil.TestingT, ctx context.Context, path string) ([]byte, error) {
|
func validateBundle(t testutil.TestingT, ctx context.Context, path string) ([]byte, error) {
|
||||||
|
@ -83,14 +80,14 @@ func unmarshalConfig(t testutil.TestingT, data []byte) *bundle.Bundle {
|
||||||
return bundle
|
return bundle
|
||||||
}
|
}
|
||||||
|
|
||||||
func deployBundle(t testutil.TestingT, ctx context.Context, path string) error {
|
func deployBundle(t testutil.TestingT, ctx context.Context, path string) {
|
||||||
ctx = env.Set(ctx, "BUNDLE_ROOT", path)
|
ctx = env.Set(ctx, "BUNDLE_ROOT", path)
|
||||||
c := testcli.NewRunner(t, ctx, "bundle", "deploy", "--force-lock", "--auto-approve")
|
c := testcli.NewRunner(t, ctx, "bundle", "deploy", "--force-lock", "--auto-approve")
|
||||||
_, _, err := c.Run()
|
_, _, err := c.Run()
|
||||||
return err
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func deployBundleWithArgs(t testutil.TestingT, ctx context.Context, path string, args ...string) (string, string, error) {
|
func deployBundleWithArgsErr(t testutil.TestingT, ctx context.Context, path string, args ...string) (string, string, error) {
|
||||||
ctx = env.Set(ctx, "BUNDLE_ROOT", path)
|
ctx = env.Set(ctx, "BUNDLE_ROOT", path)
|
||||||
args = append([]string{"bundle", "deploy"}, args...)
|
args = append([]string{"bundle", "deploy"}, args...)
|
||||||
c := testcli.NewRunner(t, ctx, args...)
|
c := testcli.NewRunner(t, ctx, args...)
|
||||||
|
@ -98,13 +95,19 @@ func deployBundleWithArgs(t testutil.TestingT, ctx context.Context, path string,
|
||||||
return stdout.String(), stderr.String(), err
|
return stdout.String(), stderr.String(), err
|
||||||
}
|
}
|
||||||
|
|
||||||
func deployBundleWithFlags(t testutil.TestingT, ctx context.Context, path string, flags []string) error {
|
func deployBundleWithArgs(t testutil.TestingT, ctx context.Context, path string, args ...string) (string, string) {
|
||||||
|
stdout, stderr, err := deployBundleWithArgsErr(t, ctx, path, args...)
|
||||||
|
require.NoError(t, err)
|
||||||
|
return stdout, stderr
|
||||||
|
}
|
||||||
|
|
||||||
|
func deployBundleWithFlags(t testutil.TestingT, ctx context.Context, path string, flags []string) {
|
||||||
ctx = env.Set(ctx, "BUNDLE_ROOT", path)
|
ctx = env.Set(ctx, "BUNDLE_ROOT", path)
|
||||||
args := []string{"bundle", "deploy", "--force-lock"}
|
args := []string{"bundle", "deploy", "--force-lock"}
|
||||||
args = append(args, flags...)
|
args = append(args, flags...)
|
||||||
c := testcli.NewRunner(t, ctx, args...)
|
c := testcli.NewRunner(t, ctx, args...)
|
||||||
_, _, err := c.Run()
|
_, _, err := c.Run()
|
||||||
return err
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func runResource(t testutil.TestingT, ctx context.Context, path, key string) (string, error) {
|
func runResource(t testutil.TestingT, ctx context.Context, path, key string) (string, error) {
|
||||||
|
@ -128,11 +131,11 @@ func runResourceWithParams(t testutil.TestingT, ctx context.Context, path, key s
|
||||||
return stdout.String(), err
|
return stdout.String(), err
|
||||||
}
|
}
|
||||||
|
|
||||||
func destroyBundle(t testutil.TestingT, ctx context.Context, path string) error {
|
func destroyBundle(t testutil.TestingT, ctx context.Context, path string) {
|
||||||
ctx = env.Set(ctx, "BUNDLE_ROOT", path)
|
ctx = env.Set(ctx, "BUNDLE_ROOT", path)
|
||||||
c := testcli.NewRunner(t, ctx, "bundle", "destroy", "--auto-approve")
|
c := testcli.NewRunner(t, ctx, "bundle", "destroy", "--auto-approve")
|
||||||
_, _, err := c.Run()
|
_, _, err := c.Run()
|
||||||
return err
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func getBundleRemoteRootPath(w *databricks.WorkspaceClient, t testutil.TestingT, uniqueId string) string {
|
func getBundleRemoteRootPath(w *databricks.WorkspaceClient, t testutil.TestingT, uniqueId string) string {
|
||||||
|
|
|
@ -24,21 +24,18 @@ func TestJobsMetadataFile(t *testing.T) {
|
||||||
|
|
||||||
nodeTypeId := testutil.GetCloud(t).NodeTypeID()
|
nodeTypeId := testutil.GetCloud(t).NodeTypeID()
|
||||||
uniqueId := uuid.New().String()
|
uniqueId := uuid.New().String()
|
||||||
bundleRoot, err := initTestTemplate(t, ctx, "job_metadata", map[string]any{
|
bundleRoot := initTestTemplate(t, ctx, "job_metadata", map[string]any{
|
||||||
"unique_id": uniqueId,
|
"unique_id": uniqueId,
|
||||||
"node_type_id": nodeTypeId,
|
"node_type_id": nodeTypeId,
|
||||||
"spark_version": defaultSparkVersion,
|
"spark_version": defaultSparkVersion,
|
||||||
})
|
})
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// deploy bundle
|
// deploy bundle
|
||||||
err = deployBundle(t, ctx, bundleRoot)
|
deployBundle(t, ctx, bundleRoot)
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Cleanup the deployed bundle
|
// Cleanup the deployed bundle
|
||||||
t.Cleanup(func() {
|
t.Cleanup(func() {
|
||||||
err = destroyBundle(t, ctx, bundleRoot)
|
destroyBundle(t, ctx, bundleRoot)
|
||||||
require.NoError(t, err)
|
|
||||||
})
|
})
|
||||||
|
|
||||||
// assert job 1 is created
|
// assert job 1 is created
|
||||||
|
|
|
@ -27,16 +27,14 @@ func TestLocalStateStaleness(t *testing.T) {
|
||||||
nodeTypeId := testutil.GetCloud(t).NodeTypeID()
|
nodeTypeId := testutil.GetCloud(t).NodeTypeID()
|
||||||
uniqueId := uuid.New().String()
|
uniqueId := uuid.New().String()
|
||||||
initialize := func() string {
|
initialize := func() string {
|
||||||
root, err := initTestTemplate(t, ctx, "basic", map[string]any{
|
root := initTestTemplate(t, ctx, "basic", map[string]any{
|
||||||
"unique_id": uniqueId,
|
"unique_id": uniqueId,
|
||||||
"node_type_id": nodeTypeId,
|
"node_type_id": nodeTypeId,
|
||||||
"spark_version": defaultSparkVersion,
|
"spark_version": defaultSparkVersion,
|
||||||
})
|
})
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
t.Cleanup(func() {
|
t.Cleanup(func() {
|
||||||
err = destroyBundle(t, ctx, root)
|
destroyBundle(t, ctx, root)
|
||||||
require.NoError(t, err)
|
|
||||||
})
|
})
|
||||||
|
|
||||||
return root
|
return root
|
||||||
|
@ -48,16 +46,13 @@ func TestLocalStateStaleness(t *testing.T) {
|
||||||
bundleB := initialize()
|
bundleB := initialize()
|
||||||
|
|
||||||
// 1) Deploy bundle A
|
// 1) Deploy bundle A
|
||||||
err = deployBundle(t, ctx, bundleA)
|
deployBundle(t, ctx, bundleA)
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// 2) Deploy bundle B
|
// 2) Deploy bundle B
|
||||||
err = deployBundle(t, ctx, bundleB)
|
deployBundle(t, ctx, bundleB)
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// 3) Deploy bundle A again
|
// 3) Deploy bundle A again
|
||||||
err = deployBundle(t, ctx, bundleA)
|
deployBundle(t, ctx, bundleA)
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Assert that there is only a single job in the workspace corresponding to this bundle.
|
// Assert that there is only a single job in the workspace corresponding to this bundle.
|
||||||
iter := w.Jobs.List(context.Background(), jobs.ListJobsRequest{
|
iter := w.Jobs.List(context.Background(), jobs.ListJobsRequest{
|
||||||
|
|
|
@ -15,21 +15,18 @@ func runPythonWheelTest(t *testing.T, templateName, sparkVersion string, pythonW
|
||||||
|
|
||||||
nodeTypeId := testutil.GetCloud(t).NodeTypeID()
|
nodeTypeId := testutil.GetCloud(t).NodeTypeID()
|
||||||
instancePoolId := env.Get(ctx, "TEST_INSTANCE_POOL_ID")
|
instancePoolId := env.Get(ctx, "TEST_INSTANCE_POOL_ID")
|
||||||
bundleRoot, err := initTestTemplate(t, ctx, templateName, map[string]any{
|
bundleRoot := initTestTemplate(t, ctx, templateName, map[string]any{
|
||||||
"node_type_id": nodeTypeId,
|
"node_type_id": nodeTypeId,
|
||||||
"unique_id": uuid.New().String(),
|
"unique_id": uuid.New().String(),
|
||||||
"spark_version": sparkVersion,
|
"spark_version": sparkVersion,
|
||||||
"python_wheel_wrapper": pythonWheelWrapper,
|
"python_wheel_wrapper": pythonWheelWrapper,
|
||||||
"instance_pool_id": instancePoolId,
|
"instance_pool_id": instancePoolId,
|
||||||
})
|
})
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
err = deployBundle(t, ctx, bundleRoot)
|
deployBundle(t, ctx, bundleRoot)
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
t.Cleanup(func() {
|
t.Cleanup(func() {
|
||||||
err := destroyBundle(t, ctx, bundleRoot)
|
destroyBundle(t, ctx, bundleRoot)
|
||||||
require.NoError(t, err)
|
|
||||||
})
|
})
|
||||||
|
|
||||||
out, err := runResource(t, ctx, bundleRoot, "some_other_job")
|
out, err := runResource(t, ctx, bundleRoot, "some_other_job")
|
||||||
|
|
|
@ -15,7 +15,7 @@ func runSparkJarTestCommon(t *testing.T, ctx context.Context, sparkVersion, arti
|
||||||
nodeTypeId := testutil.GetCloud(t).NodeTypeID()
|
nodeTypeId := testutil.GetCloud(t).NodeTypeID()
|
||||||
tmpDir := t.TempDir()
|
tmpDir := t.TempDir()
|
||||||
instancePoolId := env.Get(ctx, "TEST_INSTANCE_POOL_ID")
|
instancePoolId := env.Get(ctx, "TEST_INSTANCE_POOL_ID")
|
||||||
bundleRoot, err := initTestTemplateWithBundleRoot(t, ctx, "spark_jar_task", map[string]any{
|
bundleRoot := initTestTemplateWithBundleRoot(t, ctx, "spark_jar_task", map[string]any{
|
||||||
"node_type_id": nodeTypeId,
|
"node_type_id": nodeTypeId,
|
||||||
"unique_id": uuid.New().String(),
|
"unique_id": uuid.New().String(),
|
||||||
"spark_version": sparkVersion,
|
"spark_version": sparkVersion,
|
||||||
|
@ -23,14 +23,11 @@ func runSparkJarTestCommon(t *testing.T, ctx context.Context, sparkVersion, arti
|
||||||
"artifact_path": artifactPath,
|
"artifact_path": artifactPath,
|
||||||
"instance_pool_id": instancePoolId,
|
"instance_pool_id": instancePoolId,
|
||||||
}, tmpDir)
|
}, tmpDir)
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
err = deployBundle(t, ctx, bundleRoot)
|
deployBundle(t, ctx, bundleRoot)
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
t.Cleanup(func() {
|
t.Cleanup(func() {
|
||||||
err := destroyBundle(t, ctx, bundleRoot)
|
destroyBundle(t, ctx, bundleRoot)
|
||||||
require.NoError(t, err)
|
|
||||||
})
|
})
|
||||||
|
|
||||||
out, err := runResource(t, ctx, bundleRoot, "jar_job")
|
out, err := runResource(t, ctx, bundleRoot, "jar_job")
|
||||||
|
|
Loading…
Reference in New Issue