diff --git a/.github/workflows/close-stale-issues.yml b/.github/workflows/close-stale-issues.yml index ffe550132..273b89a9c 100644 --- a/.github/workflows/close-stale-issues.yml +++ b/.github/workflows/close-stale-issues.yml @@ -7,12 +7,16 @@ on: jobs: cleanup: + name: Stale issue job + runs-on: + group: databricks-deco-testing-runner-group + labels: ubuntu-latest-deco + permissions: issues: write contents: read pull-requests: write - runs-on: ubuntu-latest - name: Stale issue job + steps: - uses: actions/stale@v9 with: diff --git a/.github/workflows/external-message.yml b/.github/workflows/external-message.yml index 28e61503c..f06d81a47 100644 --- a/.github/workflows/external-message.yml +++ b/.github/workflows/external-message.yml @@ -13,7 +13,10 @@ on: jobs: comment-on-pr: - runs-on: ubuntu-latest + runs-on: + group: databricks-deco-testing-runner-group + labels: ubuntu-latest-deco + permissions: pull-requests: write diff --git a/.github/workflows/integration-approve.yml b/.github/workflows/integration-approve.yml index 4bdeb62a3..293d31a2a 100644 --- a/.github/workflows/integration-approve.yml +++ b/.github/workflows/integration-approve.yml @@ -17,7 +17,9 @@ jobs: # * Avoid running integration tests twice, since it was already run at the tip of the branch before squashing. # trigger: - runs-on: ubuntu-latest + runs-on: + group: databricks-deco-testing-runner-group + labels: ubuntu-latest-deco steps: - name: Auto-approve squashed commit diff --git a/.github/workflows/integration-main.yml b/.github/workflows/integration-main.yml index 064e439cf..0b6032d50 100644 --- a/.github/workflows/integration-main.yml +++ b/.github/workflows/integration-main.yml @@ -11,7 +11,10 @@ jobs: # This workflow triggers the integration test workflow in a different repository. # It requires secrets from the "test-trigger-is" environment, which are only available to authorized users. trigger: - runs-on: ubuntu-latest + runs-on: + group: databricks-deco-testing-runner-group + labels: ubuntu-latest-deco + environment: "test-trigger-is" steps: diff --git a/.github/workflows/integration-pr.yml b/.github/workflows/integration-pr.yml index c1e3a9a29..0f9c4797a 100644 --- a/.github/workflows/integration-pr.yml +++ b/.github/workflows/integration-pr.yml @@ -10,7 +10,10 @@ jobs: # This workflow triggers the integration test workflow in a different repository. # It requires secrets from the "test-trigger-is" environment, which are only available to authorized users. trigger: - runs-on: ubuntu-latest + runs-on: + group: databricks-deco-testing-runner-group + labels: ubuntu-latest-deco + environment: "test-trigger-is" # Only run this job for PRs from branches on the main repository and not from forks. diff --git a/.github/workflows/push.yml b/.github/workflows/push.yml index a4a35420a..b71b23c4b 100644 --- a/.github/workflows/push.yml +++ b/.github/workflows/push.yml @@ -13,12 +13,26 @@ on: # seed the build cache. branches: - main + schedule: + - cron: '0 0,12 * * *' # Runs at 00:00 and 12:00 UTC daily env: GOTESTSUM_FORMAT: github-actions jobs: + cleanups: + runs-on: + group: databricks-deco-testing-runner-group + labels: ubuntu-latest-deco + steps: + - name: Clean up cache if running on schedule + if: ${{ github.event_name == 'schedule' }} + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: gh cache delete --all --repo databricks/cli || true + tests: + needs: cleanups runs-on: ${{ matrix.os }} strategy: @@ -61,6 +75,7 @@ jobs: run: make test golangci: + needs: cleanups name: lint runs-on: ubuntu-latest steps: @@ -68,6 +83,10 @@ jobs: - uses: actions/setup-go@v5 with: go-version: 1.23.4 + # Use different schema from regular job, to avoid overwriting the same key + cache-dependency-path: | + go.sum + .golangci.yaml - name: Run go mod tidy run: | go mod tidy @@ -82,6 +101,7 @@ jobs: args: --timeout=15m validate-bundle-schema: + needs: cleanups runs-on: ubuntu-latest steps: @@ -92,6 +112,10 @@ jobs: uses: actions/setup-go@v5 with: go-version: 1.23.4 + # Use different schema from regular job, to avoid overwriting the same key + cache-dependency-path: | + go.sum + bundle/internal/schema/*.* - name: Verify that the schema is up to date run: | diff --git a/.github/workflows/release-snapshot.yml b/.github/workflows/release-snapshot.yml index 7ef8b43c9..5c56a294e 100644 --- a/.github/workflows/release-snapshot.yml +++ b/.github/workflows/release-snapshot.yml @@ -20,7 +20,10 @@ on: jobs: goreleaser: - runs-on: ubuntu-latest + runs-on: + group: databricks-deco-testing-runner-group + labels: ubuntu-latest-deco + steps: - name: Checkout repository and submodules uses: actions/checkout@v4 diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index e4a253531..061688506 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -9,9 +9,13 @@ on: jobs: goreleaser: + runs-on: + group: databricks-deco-testing-runner-group + labels: ubuntu-latest-deco + outputs: artifacts: ${{ steps.releaser.outputs.artifacts }} - runs-on: ubuntu-latest + steps: - name: Checkout repository and submodules uses: actions/checkout@v4 @@ -54,8 +58,12 @@ jobs: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} create-setup-cli-release-pr: + runs-on: + group: databricks-deco-testing-runner-group + labels: ubuntu-latest-deco + needs: goreleaser - runs-on: ubuntu-latest + steps: - name: Set VERSION variable from tag run: | @@ -78,8 +86,12 @@ jobs: }); create-homebrew-tap-release-pr: + runs-on: + group: databricks-deco-testing-runner-group + labels: ubuntu-latest-deco + needs: goreleaser - runs-on: ubuntu-latest + steps: - name: Set VERSION variable from tag run: | @@ -115,8 +127,12 @@ jobs: }); create-vscode-extension-update-pr: + runs-on: + group: databricks-deco-testing-runner-group + labels: ubuntu-latest-deco + needs: goreleaser - runs-on: ubuntu-latest + steps: - name: Set VERSION variable from tag run: | diff --git a/.golangci.yaml b/.golangci.yaml index 06d8152e5..07a6afdc5 100644 --- a/.golangci.yaml +++ b/.golangci.yaml @@ -14,6 +14,7 @@ linters: - testifylint - intrange - mirror + - perfsprint linters-settings: govet: enable-all: true diff --git a/CHANGELOG.md b/CHANGELOG.md index 6bdb0795b..5b59fa540 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,14 @@ # Version changelog +## [Release] Release v0.238.0 + +Bundles: + * Fix finding Python within virtualenv on Windows ([#2034](https://github.com/databricks/cli/pull/2034)). + * Include missing field descriptions in JSON schema ([#2045](https://github.com/databricks/cli/pull/2045)). + * Add validation for volume referenced from `artifact_path` ([#2050](https://github.com/databricks/cli/pull/2050)). + * Handle `${workspace.file_path}` references in source-linked deployments ([#2046](https://github.com/databricks/cli/pull/2046)). + * Set the write bit for files written during template initialization ([#2068](https://github.com/databricks/cli/pull/2068)). + ## [Release] Release v0.237.0 Bundles: diff --git a/Makefile b/Makefile index f8b725900..40eef9f31 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,6 @@ default: build -PACKAGES=./libs/... ./internal/... ./cmd/... ./bundle/... . +PACKAGES=./acceptance/... ./libs/... ./internal/... ./cmd/... ./bundle/... . GOTESTSUM_FORMAT ?= pkgname-and-test-fails diff --git a/acceptance/README.md b/acceptance/README.md new file mode 100644 index 000000000..42a37d253 --- /dev/null +++ b/acceptance/README.md @@ -0,0 +1,19 @@ +Acceptance tests are blackbox tests that are run against compiled binary. + +Currently these tests are run against "fake" HTTP server pretending to be Databricks API. However, they will be extended to run against real environment as regular integration tests. + +To author a test, + - Add a new directory under `acceptance`. Any level of nesting is supported. + - Add `databricks.yml` there. + - Add `script` with commands to run, e.g. `$CLI bundle validate`. The test case is recognized by presence of `script`. + +The test runner will run script and capture output and compare it with `output.txt` file in the same directory. + +In order to write `output.txt` for the first time or overwrite it with the current output pass -update flag to go test. + +The scripts are run with `bash -e` so any errors will be propagated. They are captured in `output.txt` by appending `Exit code: N` line at the end. + +For more complex tests one can also use: +- `errcode` helper: if the command fails with non-zero code, it appends `Exit code: N` to the output but returns success to caller (bash), allowing continuation of script. +- `trace` helper: prints the arguments before executing the command. +- custom output files: redirect output to custom file (it must start with `out`), e.g. `$CLI bundle validate > out.txt 2> out.error.txt`. diff --git a/acceptance/acceptance_test.go b/acceptance/acceptance_test.go new file mode 100644 index 000000000..759c0aeca --- /dev/null +++ b/acceptance/acceptance_test.go @@ -0,0 +1,302 @@ +package acceptance_test + +import ( + "errors" + "fmt" + "io" + "os" + "os/exec" + "path/filepath" + "runtime" + "slices" + "sort" + "strings" + "testing" + "time" + + "github.com/databricks/cli/internal/testutil" + "github.com/databricks/cli/libs/env" + "github.com/databricks/cli/libs/testdiff" + "github.com/stretchr/testify/require" +) + +var KeepTmp = os.Getenv("KEEP_TMP") != "" + +const ( + EntryPointScript = "script" + CleanupScript = "script.cleanup" + PrepareScript = "script.prepare" +) + +var Scripts = map[string]bool{ + EntryPointScript: true, + CleanupScript: true, + PrepareScript: true, +} + +func TestAccept(t *testing.T) { + execPath := BuildCLI(t) + // $CLI is what test scripts are using + t.Setenv("CLI", execPath) + + server := StartServer(t) + AddHandlers(server) + // Redirect API access to local server: + t.Setenv("DATABRICKS_HOST", fmt.Sprintf("http://127.0.0.1:%d", server.Port)) + t.Setenv("DATABRICKS_TOKEN", "dapi1234") + + homeDir := t.TempDir() + // Do not read user's ~/.databrickscfg + t.Setenv(env.HomeEnvVar(), homeDir) + + testDirs := getTests(t) + require.NotEmpty(t, testDirs) + for _, dir := range testDirs { + t.Run(dir, func(t *testing.T) { + t.Parallel() + runTest(t, dir) + }) + } +} + +func getTests(t *testing.T) []string { + testDirs := make([]string, 0, 128) + + err := filepath.Walk(".", func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + name := filepath.Base(path) + if name == EntryPointScript { + // Presence of 'script' marks a test case in this directory + testDirs = append(testDirs, filepath.Dir(path)) + } + return nil + }) + require.NoError(t, err) + + sort.Strings(testDirs) + return testDirs +} + +func runTest(t *testing.T, dir string) { + var tmpDir string + var err error + if KeepTmp { + tempDirBase := filepath.Join(os.TempDir(), "acceptance") + _ = os.Mkdir(tempDirBase, 0o755) + tmpDir, err = os.MkdirTemp(tempDirBase, "") + require.NoError(t, err) + t.Logf("Created directory: %s", tmpDir) + } else { + tmpDir = t.TempDir() + } + + scriptContents := readMergedScriptContents(t, dir) + testutil.WriteFile(t, filepath.Join(tmpDir, EntryPointScript), scriptContents) + + inputs := make(map[string]bool, 2) + outputs := make(map[string]bool, 2) + err = CopyDir(dir, tmpDir, inputs, outputs) + require.NoError(t, err) + + args := []string{"bash", "-euo", "pipefail", EntryPointScript} + cmd := exec.Command(args[0], args[1:]...) + cmd.Dir = tmpDir + outB, err := cmd.CombinedOutput() + + out := formatOutput(string(outB), err) + out = strings.ReplaceAll(out, os.Getenv("CLI"), "$CLI") + doComparison(t, filepath.Join(dir, "output.txt"), "script output", out) + + for key := range outputs { + if key == "output.txt" { + // handled above + continue + } + pathNew := filepath.Join(tmpDir, key) + newValBytes, err := os.ReadFile(pathNew) + if err != nil { + if errors.Is(err, os.ErrNotExist) { + t.Errorf("%s: expected to find this file but could not (%s)", key, tmpDir) + } else { + t.Errorf("%s: could not read: %s", key, err) + } + continue + } + pathExpected := filepath.Join(dir, key) + doComparison(t, pathExpected, pathNew, string(newValBytes)) + } + + // Make sure there are not unaccounted for new files + files, err := os.ReadDir(tmpDir) + require.NoError(t, err) + + for _, f := range files { + name := f.Name() + if _, ok := inputs[name]; ok { + continue + } + if _, ok := outputs[name]; ok { + continue + } + t.Errorf("Unexpected output: %s", f) + if strings.HasPrefix(name, "out") { + // We have a new file starting with "out" + // Show the contents & support overwrite mode for it: + pathNew := filepath.Join(tmpDir, name) + newVal := testutil.ReadFile(t, pathNew) + doComparison(t, filepath.Join(dir, name), filepath.Join(tmpDir, name), newVal) + } + } +} + +func doComparison(t *testing.T, pathExpected, pathNew, valueNew string) { + valueNew = testdiff.NormalizeNewlines(valueNew) + valueExpected := string(readIfExists(t, pathExpected)) + valueExpected = testdiff.NormalizeNewlines(valueExpected) + testdiff.AssertEqualTexts(t, pathExpected, pathNew, valueExpected, valueNew) + if testdiff.OverwriteMode { + if valueNew != "" { + t.Logf("Overwriting: %s", pathExpected) + testutil.WriteFile(t, pathExpected, valueNew) + } else { + t.Logf("Removing: %s", pathExpected) + _ = os.Remove(pathExpected) + } + } +} + +// Returns combined script.prepare (root) + script.prepare (parent) + ... + script + ... + script.cleanup (parent) + ... +// Note, cleanups are not executed if main script fails; that's not a huge issue, since it runs it temp dir. +func readMergedScriptContents(t *testing.T, dir string) string { + scriptContents := testutil.ReadFile(t, filepath.Join(dir, EntryPointScript)) + prepares := []string{} + cleanups := []string{} + + for { + x := readIfExists(t, filepath.Join(dir, CleanupScript)) + if len(x) > 0 { + cleanups = append(cleanups, string(x)) + } + + x = readIfExists(t, filepath.Join(dir, PrepareScript)) + if len(x) > 0 { + prepares = append(prepares, string(x)) + } + + if dir == "" || dir == "." { + break + } + + dir = filepath.Dir(dir) + require.True(t, filepath.IsLocal(dir)) + } + + slices.Reverse(prepares) + prepares = append(prepares, scriptContents) + prepares = append(prepares, cleanups...) + return strings.Join(prepares, "\n") +} + +func BuildCLI(t *testing.T) string { + cwd, err := os.Getwd() + require.NoError(t, err) + execPath := filepath.Join(cwd, "build", "databricks") + if runtime.GOOS == "windows" { + execPath += ".exe" + } + + start := time.Now() + args := []string{"go", "build", "-mod", "vendor", "-o", execPath} + cmd := exec.Command(args[0], args[1:]...) + cmd.Dir = ".." + out, err := cmd.CombinedOutput() + elapsed := time.Since(start) + t.Logf("%s took %s", args, elapsed) + require.NoError(t, err, "go build failed: %s: %s\n%s", args, err, out) + if len(out) > 0 { + t.Logf("go build output: %s: %s", args, out) + } + + // Quick check + warm up cache: + cmd = exec.Command(execPath, "--version") + out, err = cmd.CombinedOutput() + require.NoError(t, err, "%s --version failed: %s\n%s", execPath, err, out) + return execPath +} + +func copyFile(src, dst string) error { + in, err := os.Open(src) + if err != nil { + return err + } + defer in.Close() + + out, err := os.Create(dst) + if err != nil { + return err + } + defer out.Close() + + _, err = io.Copy(out, in) + return err +} + +func formatOutput(out string, err error) string { + if err == nil { + return out + } + if exiterr, ok := err.(*exec.ExitError); ok { + exitCode := exiterr.ExitCode() + out += fmt.Sprintf("\nExit code: %d\n", exitCode) + } else { + out += fmt.Sprintf("\nError: %s\n", err) + } + return out +} + +func readIfExists(t *testing.T, path string) []byte { + data, err := os.ReadFile(path) + if err == nil { + return data + } + + if !errors.Is(err, os.ErrNotExist) { + t.Fatalf("%s: %s", path, err) + } + return []byte{} +} + +func CopyDir(src, dst string, inputs, outputs map[string]bool) error { + return filepath.Walk(src, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + name := info.Name() + + relPath, err := filepath.Rel(src, path) + if err != nil { + return err + } + + if strings.HasPrefix(name, "out") { + outputs[relPath] = true + return nil + } else { + inputs[relPath] = true + } + + if _, ok := Scripts[name]; ok { + return nil + } + + destPath := filepath.Join(dst, relPath) + + if info.IsDir() { + return os.MkdirAll(destPath, info.Mode()) + } + + return copyFile(path, destPath) + }) +} diff --git a/acceptance/build/.gitignore b/acceptance/build/.gitignore new file mode 100644 index 000000000..a48b4db25 --- /dev/null +++ b/acceptance/build/.gitignore @@ -0,0 +1 @@ +databricks diff --git a/bundle/tests/clusters/databricks.yml b/acceptance/bundle/override/clusters/databricks.yml similarity index 92% rename from bundle/tests/clusters/databricks.yml rename to acceptance/bundle/override/clusters/databricks.yml index 1074462a6..14efceec0 100644 --- a/bundle/tests/clusters/databricks.yml +++ b/acceptance/bundle/override/clusters/databricks.yml @@ -1,9 +1,6 @@ bundle: name: clusters -workspace: - host: https://acme.cloud.databricks.com/ - resources: clusters: foo: diff --git a/acceptance/bundle/override/clusters/output.txt b/acceptance/bundle/override/clusters/output.txt new file mode 100644 index 000000000..cff30b3af --- /dev/null +++ b/acceptance/bundle/override/clusters/output.txt @@ -0,0 +1,33 @@ + +>>> $CLI bundle validate -o json -t default +{ + "autoscale": { + "max_workers": 7, + "min_workers": 2 + }, + "cluster_name": "foo", + "custom_tags": {}, + "node_type_id": "i3.xlarge", + "num_workers": 2, + "spark_conf": { + "spark.executor.memory": "2g" + }, + "spark_version": "13.3.x-scala2.12" +} + +>>> $CLI bundle validate -o json -t development +{ + "autoscale": { + "max_workers": 3, + "min_workers": 1 + }, + "cluster_name": "foo-override", + "custom_tags": {}, + "node_type_id": "m5.xlarge", + "num_workers": 3, + "spark_conf": { + "spark.executor.memory": "4g", + "spark.executor.memory2": "4g" + }, + "spark_version": "15.2.x-scala2.12" +} diff --git a/acceptance/bundle/override/clusters/script b/acceptance/bundle/override/clusters/script new file mode 100644 index 000000000..4a73dd93e --- /dev/null +++ b/acceptance/bundle/override/clusters/script @@ -0,0 +1,2 @@ +trace $CLI bundle validate -o json -t default | jq .resources.clusters.foo +trace $CLI bundle validate -o json -t development | jq .resources.clusters.foo diff --git a/bundle/tests/override_job_cluster/databricks.yml b/acceptance/bundle/override/job_cluster/databricks.yml similarity index 91% rename from bundle/tests/override_job_cluster/databricks.yml rename to acceptance/bundle/override/job_cluster/databricks.yml index a85b3b711..d6b7ede4f 100644 --- a/bundle/tests/override_job_cluster/databricks.yml +++ b/acceptance/bundle/override/job_cluster/databricks.yml @@ -1,9 +1,6 @@ bundle: name: override_job_cluster -workspace: - host: https://acme.cloud.databricks.com/ - resources: jobs: foo: diff --git a/acceptance/bundle/override/job_cluster/output.txt b/acceptance/bundle/override/job_cluster/output.txt new file mode 100644 index 000000000..947d19032 --- /dev/null +++ b/acceptance/bundle/override/job_cluster/output.txt @@ -0,0 +1,56 @@ + +>>> $CLI bundle validate -o json -t development +{ + "foo": { + "deployment": { + "kind": "BUNDLE", + "metadata_file_path": "/Workspace/Users/tester@databricks.com/.bundle/override_job_cluster/development/state/metadata.json" + }, + "edit_mode": "UI_LOCKED", + "format": "MULTI_TASK", + "job_clusters": [ + { + "job_cluster_key": "key", + "new_cluster": { + "node_type_id": "i3.xlarge", + "num_workers": 1, + "spark_version": "13.3.x-scala2.12" + } + } + ], + "name": "job", + "permissions": [], + "queue": { + "enabled": true + }, + "tags": {} + } +} + +>>> $CLI bundle validate -o json -t staging +{ + "foo": { + "deployment": { + "kind": "BUNDLE", + "metadata_file_path": "/Workspace/Users/tester@databricks.com/.bundle/override_job_cluster/staging/state/metadata.json" + }, + "edit_mode": "UI_LOCKED", + "format": "MULTI_TASK", + "job_clusters": [ + { + "job_cluster_key": "key", + "new_cluster": { + "node_type_id": "i3.2xlarge", + "num_workers": 4, + "spark_version": "13.3.x-scala2.12" + } + } + ], + "name": "job", + "permissions": [], + "queue": { + "enabled": true + }, + "tags": {} + } +} diff --git a/acceptance/bundle/override/job_cluster/script b/acceptance/bundle/override/job_cluster/script new file mode 100644 index 000000000..4a26c433a --- /dev/null +++ b/acceptance/bundle/override/job_cluster/script @@ -0,0 +1,2 @@ +trace $CLI bundle validate -o json -t development | jq '.resources.jobs' +trace $CLI bundle validate -o json -t staging | jq '.resources.jobs' diff --git a/acceptance/bundle/override/job_cluster_var/databricks.yml b/acceptance/bundle/override/job_cluster_var/databricks.yml new file mode 100644 index 000000000..546cc2d8a --- /dev/null +++ b/acceptance/bundle/override/job_cluster_var/databricks.yml @@ -0,0 +1,37 @@ +bundle: + name: override_job_cluster + +variables: + mykey: + default: key + +resources: + jobs: + foo: + name: job + job_clusters: + - job_cluster_key: key + new_cluster: + spark_version: 13.3.x-scala2.12 + +targets: + development: + resources: + jobs: + foo: + job_clusters: + # This does not work because merging is done before resolution + - job_cluster_key: "${var.mykey}" + new_cluster: + node_type_id: i3.xlarge + num_workers: 1 + + staging: + resources: + jobs: + foo: + job_clusters: + - job_cluster_key: "${var.mykey}" + new_cluster: + node_type_id: i3.2xlarge + num_workers: 4 diff --git a/acceptance/bundle/override/job_cluster_var/output.txt b/acceptance/bundle/override/job_cluster_var/output.txt new file mode 100644 index 000000000..dee2a3b5b --- /dev/null +++ b/acceptance/bundle/override/job_cluster_var/output.txt @@ -0,0 +1,84 @@ + +>>> $CLI bundle validate -o json -t development +{ + "foo": { + "deployment": { + "kind": "BUNDLE", + "metadata_file_path": "/Workspace/Users/tester@databricks.com/.bundle/override_job_cluster/development/state/metadata.json" + }, + "edit_mode": "UI_LOCKED", + "format": "MULTI_TASK", + "job_clusters": [ + { + "job_cluster_key": "key", + "new_cluster": { + "spark_version": "13.3.x-scala2.12" + } + }, + { + "job_cluster_key": "key", + "new_cluster": { + "node_type_id": "i3.xlarge", + "num_workers": 1 + } + } + ], + "name": "job", + "permissions": [], + "queue": { + "enabled": true + }, + "tags": {} + } +} + +>>> $CLI bundle validate -t development +Name: override_job_cluster +Target: development +Workspace: + User: tester@databricks.com + Path: /Workspace/Users/tester@databricks.com/.bundle/override_job_cluster/development + +Validation OK! + +>>> $CLI bundle validate -o json -t staging +{ + "foo": { + "deployment": { + "kind": "BUNDLE", + "metadata_file_path": "/Workspace/Users/tester@databricks.com/.bundle/override_job_cluster/staging/state/metadata.json" + }, + "edit_mode": "UI_LOCKED", + "format": "MULTI_TASK", + "job_clusters": [ + { + "job_cluster_key": "key", + "new_cluster": { + "spark_version": "13.3.x-scala2.12" + } + }, + { + "job_cluster_key": "key", + "new_cluster": { + "node_type_id": "i3.2xlarge", + "num_workers": 4 + } + } + ], + "name": "job", + "permissions": [], + "queue": { + "enabled": true + }, + "tags": {} + } +} + +>>> $CLI bundle validate -t staging +Name: override_job_cluster +Target: staging +Workspace: + User: tester@databricks.com + Path: /Workspace/Users/tester@databricks.com/.bundle/override_job_cluster/staging + +Validation OK! diff --git a/acceptance/bundle/override/job_cluster_var/script b/acceptance/bundle/override/job_cluster_var/script new file mode 100644 index 000000000..1cf373828 --- /dev/null +++ b/acceptance/bundle/override/job_cluster_var/script @@ -0,0 +1,4 @@ +trace $CLI bundle validate -o json -t development | jq '.resources.jobs' +trace $CLI bundle validate -t development +trace $CLI bundle validate -o json -t staging | jq '.resources.jobs' +trace $CLI bundle validate -t staging diff --git a/bundle/tests/override_job_tasks/databricks.yml b/acceptance/bundle/override/job_tasks/databricks.yml similarity index 94% rename from bundle/tests/override_job_tasks/databricks.yml rename to acceptance/bundle/override/job_tasks/databricks.yml index ddee28793..fd7edafb9 100644 --- a/bundle/tests/override_job_tasks/databricks.yml +++ b/acceptance/bundle/override/job_tasks/databricks.yml @@ -1,9 +1,6 @@ bundle: name: override_job_tasks -workspace: - host: https://acme.cloud.databricks.com/ - resources: jobs: foo: diff --git a/acceptance/bundle/override/job_tasks/output.txt b/acceptance/bundle/override/job_tasks/output.txt new file mode 100644 index 000000000..0d561291e --- /dev/null +++ b/acceptance/bundle/override/job_tasks/output.txt @@ -0,0 +1,68 @@ + +>>> errcode $CLI bundle validate -o json -t development +Error: file ./test1.py not found + +Exit code: 1 +{ + "name": "job", + "queue": { + "enabled": true + }, + "tags": {}, + "tasks": [ + { + "new_cluster": { + "node_type_id": "i3.xlarge", + "num_workers": 1, + "spark_version": "13.3.x-scala2.12" + }, + "spark_python_task": { + "python_file": "./test1.py" + }, + "task_key": "key1" + }, + { + "new_cluster": { + "spark_version": "13.3.x-scala2.12" + }, + "spark_python_task": { + "python_file": "./test2.py" + }, + "task_key": "key2" + } + ] +} + +>>> errcode $CLI bundle validate -o json -t staging +Error: file ./test1.py not found + +Exit code: 1 +{ + "name": "job", + "queue": { + "enabled": true + }, + "tags": {}, + "tasks": [ + { + "new_cluster": { + "spark_version": "13.3.x-scala2.12" + }, + "spark_python_task": { + "python_file": "./test1.py" + }, + "task_key": "key1" + }, + { + "new_cluster": { + "node_type_id": "i3.2xlarge", + "num_workers": 4, + "spark_version": "13.3.x-scala2.12" + }, + "spark_python_task": { + "python_file": "./test3.py" + }, + "task_key": "key2" + } + ] +} diff --git a/acceptance/bundle/override/job_tasks/script b/acceptance/bundle/override/job_tasks/script new file mode 100644 index 000000000..4e0869857 --- /dev/null +++ b/acceptance/bundle/override/job_tasks/script @@ -0,0 +1,2 @@ +trace errcode $CLI bundle validate -o json -t development | jq .resources.jobs.foo +trace errcode $CLI bundle validate -o json -t staging | jq .resources.jobs.foo diff --git a/acceptance/bundle/override/merge-string-map/databricks.yml b/acceptance/bundle/override/merge-string-map/databricks.yml new file mode 100644 index 000000000..5e443ceca --- /dev/null +++ b/acceptance/bundle/override/merge-string-map/databricks.yml @@ -0,0 +1,13 @@ +bundle: + name: merge-string-map + +resources: + clusters: + my_cluster: "hello" + +targets: + dev: + resources: + clusters: + my_cluster: + spark_version: "25" diff --git a/acceptance/bundle/override/merge-string-map/output.txt b/acceptance/bundle/override/merge-string-map/output.txt new file mode 100644 index 000000000..e1bd7dfb4 --- /dev/null +++ b/acceptance/bundle/override/merge-string-map/output.txt @@ -0,0 +1,23 @@ + +>>> $CLI bundle validate -o json -t dev +{ + "clusters": { + "my_cluster": { + "custom_tags": {}, + "spark_version": "25" + } + } +} + +>>> $CLI bundle validate -t dev +Warning: expected map, found string + at resources.clusters.my_cluster + in databricks.yml:6:17 + +Name: merge-string-map +Target: dev +Workspace: + User: tester@databricks.com + Path: /Workspace/Users/tester@databricks.com/.bundle/merge-string-map/dev + +Found 1 warning diff --git a/acceptance/bundle/override/merge-string-map/script b/acceptance/bundle/override/merge-string-map/script new file mode 100644 index 000000000..a109d5f69 --- /dev/null +++ b/acceptance/bundle/override/merge-string-map/script @@ -0,0 +1,2 @@ +trace $CLI bundle validate -o json -t dev | jq .resources +trace $CLI bundle validate -t dev diff --git a/bundle/tests/override_pipeline_cluster/databricks.yml b/acceptance/bundle/override/pipeline_cluster/databricks.yml similarity index 90% rename from bundle/tests/override_pipeline_cluster/databricks.yml rename to acceptance/bundle/override/pipeline_cluster/databricks.yml index 8930f30e8..8b4857460 100644 --- a/bundle/tests/override_pipeline_cluster/databricks.yml +++ b/acceptance/bundle/override/pipeline_cluster/databricks.yml @@ -1,9 +1,6 @@ bundle: name: override_pipeline_cluster -workspace: - host: https://acme.cloud.databricks.com/ - resources: pipelines: foo: diff --git a/acceptance/bundle/override/pipeline_cluster/output.txt b/acceptance/bundle/override/pipeline_cluster/output.txt new file mode 100644 index 000000000..81bf58180 --- /dev/null +++ b/acceptance/bundle/override/pipeline_cluster/output.txt @@ -0,0 +1,44 @@ + +>>> $CLI bundle validate -o json -t development +{ + "foo": { + "clusters": [ + { + "label": "default", + "node_type_id": "i3.xlarge", + "num_workers": 1, + "spark_conf": { + "foo": "bar" + } + } + ], + "deployment": { + "kind": "BUNDLE", + "metadata_file_path": "/Workspace/Users/tester@databricks.com/.bundle/override_pipeline_cluster/development/state/metadata.json" + }, + "name": "job", + "permissions": [] + } +} + +>>> $CLI bundle validate -o json -t staging +{ + "foo": { + "clusters": [ + { + "label": "default", + "node_type_id": "i3.2xlarge", + "num_workers": 4, + "spark_conf": { + "foo": "bar" + } + } + ], + "deployment": { + "kind": "BUNDLE", + "metadata_file_path": "/Workspace/Users/tester@databricks.com/.bundle/override_pipeline_cluster/staging/state/metadata.json" + }, + "name": "job", + "permissions": [] + } +} diff --git a/acceptance/bundle/override/pipeline_cluster/script b/acceptance/bundle/override/pipeline_cluster/script new file mode 100644 index 000000000..b06005ce5 --- /dev/null +++ b/acceptance/bundle/override/pipeline_cluster/script @@ -0,0 +1,2 @@ +trace $CLI bundle validate -o json -t development | jq .resources.pipelines +trace $CLI bundle validate -o json -t staging | jq .resources.pipelines diff --git a/acceptance/bundle/variables/complex-transitive/databricks.yml b/acceptance/bundle/variables/complex-transitive/databricks.yml new file mode 100644 index 000000000..9ef4e6386 --- /dev/null +++ b/acceptance/bundle/variables/complex-transitive/databricks.yml @@ -0,0 +1,19 @@ +bundle: + name: complex-transitive + +variables: + catalog: + default: hive_metastore + spark_conf: + default: + "spark.databricks.sql.initial.catalog.name": ${var.catalog} + etl_cluster_config: + type: complex + default: + spark_version: 14.3.x-scala2.12 + runtime_engine: PHOTON + spark_conf: ${var.spark_conf} + +resources: + clusters: + my_cluster: ${var.etl_cluster_config} diff --git a/acceptance/bundle/variables/complex-transitive/output.txt b/acceptance/bundle/variables/complex-transitive/output.txt new file mode 100644 index 000000000..a031e0497 --- /dev/null +++ b/acceptance/bundle/variables/complex-transitive/output.txt @@ -0,0 +1,3 @@ +{ + "spark.databricks.sql.initial.catalog.name": "${var.catalog}" +} diff --git a/acceptance/bundle/variables/complex-transitive/script b/acceptance/bundle/variables/complex-transitive/script new file mode 100644 index 000000000..52bb08ed4 --- /dev/null +++ b/acceptance/bundle/variables/complex-transitive/script @@ -0,0 +1,2 @@ +# Currently, this incorrectly outputs variable reference instead of resolved value +$CLI bundle validate -o json | jq '.resources.clusters.my_cluster.spark_conf' diff --git a/bundle/tests/variables/complex/databricks.yml b/acceptance/bundle/variables/complex/databricks.yml similarity index 82% rename from bundle/tests/variables/complex/databricks.yml rename to acceptance/bundle/variables/complex/databricks.yml index 3b32a7c8e..5dcc30b08 100644 --- a/bundle/tests/variables/complex/databricks.yml +++ b/acceptance/bundle/variables/complex/databricks.yml @@ -11,6 +11,7 @@ resources: - task_key: test job_cluster_key: key libraries: ${variables.libraries.value} + # specific fields of complex variable are referenced: task_key: "task with spark version ${var.cluster.spark_version} and jar ${var.libraries[0].jar}" variables: @@ -35,30 +36,21 @@ variables: - jar: "/path/to/jar" - egg: "/path/to/egg" - whl: "/path/to/whl" - complexvar: - type: complex - description: "A complex variable" - default: - key1: "value1" - key2: "value2" - key3: "value3" - targets: default: + default: true dev: variables: node_type: "Standard_DS3_v3" cluster: + # complex variables are not merged, so missing variables (policy_id) are not inherited spark_version: "14.2.x-scala2.11" node_type_id: ${var.node_type} num_workers: 4 spark_conf: spark.speculation: false spark.databricks.delta.retentionDurationCheck.enabled: false - complexvar: - type: complex - default: - key1: "1" - key2: "2" - key3: "3" + libraries: + - jar: "/newpath/to/jar" + - whl: "/newpath/to/whl" diff --git a/acceptance/bundle/variables/complex/out.default.json b/acceptance/bundle/variables/complex/out.default.json new file mode 100644 index 000000000..6454562a6 --- /dev/null +++ b/acceptance/bundle/variables/complex/out.default.json @@ -0,0 +1,110 @@ +{ + "resources": { + "jobs": { + "my_job": { + "deployment": { + "kind": "BUNDLE", + "metadata_file_path": "/Workspace/Users/tester@databricks.com/.bundle/complex-variables/default/state/metadata.json" + }, + "edit_mode": "UI_LOCKED", + "format": "MULTI_TASK", + "job_clusters": [ + { + "job_cluster_key": "key", + "new_cluster": { + "node_type_id": "Standard_DS3_v2", + "num_workers": 2, + "policy_id": "some-policy-id", + "spark_conf": { + "spark.databricks.delta.retentionDurationCheck.enabled": "false", + "spark.random": "true", + "spark.speculation": "true" + }, + "spark_version": "13.2.x-scala2.11" + } + } + ], + "permissions": [], + "queue": { + "enabled": true + }, + "tags": {}, + "tasks": [ + { + "job_cluster_key": "key", + "libraries": [ + { + "jar": "/path/to/jar" + }, + { + "egg": "/path/to/egg" + }, + { + "whl": "/path/to/whl" + } + ], + "task_key": "task with spark version 13.2.x-scala2.11 and jar /path/to/jar" + } + ] + } + } + }, + "variables": { + "cluster": { + "default": { + "node_type_id": "Standard_DS3_v2", + "num_workers": 2, + "policy_id": "some-policy-id", + "spark_conf": { + "spark.databricks.delta.retentionDurationCheck.enabled": false, + "spark.random": true, + "spark.speculation": true + }, + "spark_version": "13.2.x-scala2.11" + }, + "description": "A cluster definition", + "type": "complex", + "value": { + "node_type_id": "Standard_DS3_v2", + "num_workers": 2, + "policy_id": "some-policy-id", + "spark_conf": { + "spark.databricks.delta.retentionDurationCheck.enabled": false, + "spark.random": true, + "spark.speculation": true + }, + "spark_version": "13.2.x-scala2.11" + } + }, + "libraries": { + "default": [ + { + "jar": "/path/to/jar" + }, + { + "egg": "/path/to/egg" + }, + { + "whl": "/path/to/whl" + } + ], + "description": "A libraries definition", + "type": "complex", + "value": [ + { + "jar": "/path/to/jar" + }, + { + "egg": "/path/to/egg" + }, + { + "whl": "/path/to/whl" + } + ] + }, + "node_type": { + "default": "Standard_DS3_v2", + "value": "Standard_DS3_v2" + } + } +} diff --git a/acceptance/bundle/variables/complex/out.dev.json b/acceptance/bundle/variables/complex/out.dev.json new file mode 100644 index 000000000..cede5feb2 --- /dev/null +++ b/acceptance/bundle/variables/complex/out.dev.json @@ -0,0 +1,95 @@ +{ + "resources": { + "jobs": { + "my_job": { + "deployment": { + "kind": "BUNDLE", + "metadata_file_path": "/Workspace/Users/tester@databricks.com/.bundle/complex-variables/dev/state/metadata.json" + }, + "edit_mode": "UI_LOCKED", + "format": "MULTI_TASK", + "job_clusters": [ + { + "job_cluster_key": "key", + "new_cluster": { + "node_type_id": "Standard_DS3_v3", + "num_workers": 4, + "spark_conf": { + "spark.databricks.delta.retentionDurationCheck.enabled": "false", + "spark.speculation": "false" + }, + "spark_version": "14.2.x-scala2.11" + } + } + ], + "permissions": [], + "queue": { + "enabled": true + }, + "tags": {}, + "tasks": [ + { + "job_cluster_key": "key", + "libraries": [ + { + "jar": "/newpath/to/jar" + }, + { + "whl": "/newpath/to/whl" + } + ], + "task_key": "task with spark version 14.2.x-scala2.11 and jar /newpath/to/jar" + } + ] + } + } + }, + "variables": { + "cluster": { + "default": { + "node_type_id": "Standard_DS3_v3", + "num_workers": 4, + "spark_conf": { + "spark.databricks.delta.retentionDurationCheck.enabled": false, + "spark.speculation": false + }, + "spark_version": "14.2.x-scala2.11" + }, + "description": "A cluster definition", + "type": "complex", + "value": { + "node_type_id": "Standard_DS3_v3", + "num_workers": 4, + "spark_conf": { + "spark.databricks.delta.retentionDurationCheck.enabled": false, + "spark.speculation": false + }, + "spark_version": "14.2.x-scala2.11" + } + }, + "libraries": { + "default": [ + { + "jar": "/newpath/to/jar" + }, + { + "whl": "/newpath/to/whl" + } + ], + "description": "A libraries definition", + "type": "complex", + "value": [ + { + "jar": "/newpath/to/jar" + }, + { + "whl": "/newpath/to/whl" + } + ] + }, + "node_type": { + "default": "Standard_DS3_v3", + "value": "Standard_DS3_v3" + } + } +} diff --git a/acceptance/bundle/variables/complex/output.txt b/acceptance/bundle/variables/complex/output.txt new file mode 100644 index 000000000..ce295421f --- /dev/null +++ b/acceptance/bundle/variables/complex/output.txt @@ -0,0 +1,14 @@ + +>>> $CLI bundle validate -o json + +>>> jq .resources.jobs.my_job.tasks[0].task_key out.default.json +"task with spark version 13.2.x-scala2.11 and jar /path/to/jar" + +>>> $CLI bundle validate -o json -t dev + +>>> jq .resources.jobs.my_job.tasks[0].task_key out.dev.json +"task with spark version 14.2.x-scala2.11 and jar /newpath/to/jar" +policy_id and spark_conf.spark_random fields do not exist in dev target: + +>>> jq .resources.jobs.my_job.job_clusters[0].new_cluster.policy_id out.dev.json +null diff --git a/acceptance/bundle/variables/complex/script b/acceptance/bundle/variables/complex/script new file mode 100644 index 000000000..f8b61f18d --- /dev/null +++ b/acceptance/bundle/variables/complex/script @@ -0,0 +1,8 @@ +trace $CLI bundle validate -o json | jq '{resources,variables}' > out.default.json +trace jq .resources.jobs.my_job.tasks[0].task_key out.default.json | grep "task with spark version 13.2.x-scala2.11 and jar /path/to/jar" + +trace $CLI bundle validate -o json -t dev | jq '{resources,variables}' > out.dev.json +trace jq .resources.jobs.my_job.tasks[0].task_key out.dev.json | grep "task with spark version 14.2.x-scala2.11 and jar /newpath/to/jar" + +echo policy_id and spark_conf.spark_random fields do not exist in dev target: +trace jq .resources.jobs.my_job.job_clusters[0].new_cluster.policy_id out.dev.json | grep null diff --git a/bundle/tests/variables/complex_multiple_files/databricks.yml b/acceptance/bundle/variables/complex_multiple_files/databricks.yml similarity index 100% rename from bundle/tests/variables/complex_multiple_files/databricks.yml rename to acceptance/bundle/variables/complex_multiple_files/databricks.yml diff --git a/acceptance/bundle/variables/complex_multiple_files/output.txt b/acceptance/bundle/variables/complex_multiple_files/output.txt new file mode 100644 index 000000000..e87b8df11 --- /dev/null +++ b/acceptance/bundle/variables/complex_multiple_files/output.txt @@ -0,0 +1,159 @@ +{ + "resources": { + "jobs": { + "my_job": { + "deployment": { + "kind": "BUNDLE", + "metadata_file_path": "/Workspace/Users/tester@databricks.com/.bundle/complex-variables-multiple-files/dev/state/metadata.json" + }, + "edit_mode": "UI_LOCKED", + "format": "MULTI_TASK", + "job_clusters": [ + { + "job_cluster_key": "key1", + "new_cluster": { + "node_type_id": "Standard_DS3_v2", + "num_workers": 4, + "spark_conf": { + "spark.databricks.delta.retentionDurationCheck.enabled": "false", + "spark.speculation": "false" + }, + "spark_version": "14.2.x-scala2.11" + } + }, + { + "job_cluster_key": "key2", + "new_cluster": { + "node_type_id": "Standard_DS3_v2", + "num_workers": 4, + "spark_conf": { + "spark.databricks.delta.retentionDurationCheck.enabled": "false", + "spark.speculation": "false" + }, + "spark_version": "14.2.x-scala2.11" + } + }, + { + "job_cluster_key": "key3", + "new_cluster": { + "node_type_id": "Standard_DS3_v2", + "num_workers": 4, + "spark_conf": { + "spark.databricks.delta.retentionDurationCheck.enabled": "false", + "spark.speculation": "false" + }, + "spark_version": "14.2.x-scala2.11" + } + }, + { + "job_cluster_key": "key4", + "new_cluster": { + "node_type_id": "Standard_DS3_v2", + "num_workers": 4, + "spark_conf": { + "spark.databricks.delta.retentionDurationCheck.enabled": "false", + "spark.speculation": "false" + }, + "spark_version": "14.2.x-scala2.11" + } + } + ], + "permissions": [], + "queue": { + "enabled": true + }, + "tags": {} + } + } + }, + "variables": { + "cluster1": { + "default": { + "node_type_id": "Standard_DS3_v2", + "num_workers": 4, + "spark_conf": { + "spark.databricks.delta.retentionDurationCheck.enabled": false, + "spark.speculation": false + }, + "spark_version": "14.2.x-scala2.11" + }, + "description": "A cluster definition", + "type": "complex", + "value": { + "node_type_id": "Standard_DS3_v2", + "num_workers": 4, + "spark_conf": { + "spark.databricks.delta.retentionDurationCheck.enabled": false, + "spark.speculation": false + }, + "spark_version": "14.2.x-scala2.11" + } + }, + "cluster2": { + "default": { + "node_type_id": "Standard_DS3_v2", + "num_workers": 4, + "spark_conf": { + "spark.databricks.delta.retentionDurationCheck.enabled": false, + "spark.speculation": false + }, + "spark_version": "14.2.x-scala2.11" + }, + "description": "A cluster definition", + "type": "complex", + "value": { + "node_type_id": "Standard_DS3_v2", + "num_workers": 4, + "spark_conf": { + "spark.databricks.delta.retentionDurationCheck.enabled": false, + "spark.speculation": false + }, + "spark_version": "14.2.x-scala2.11" + } + }, + "cluster3": { + "default": { + "node_type_id": "Standard_DS3_v2", + "num_workers": 4, + "spark_conf": { + "spark.databricks.delta.retentionDurationCheck.enabled": false, + "spark.speculation": false + }, + "spark_version": "14.2.x-scala2.11" + }, + "description": "A cluster definition", + "type": "complex", + "value": { + "node_type_id": "Standard_DS3_v2", + "num_workers": 4, + "spark_conf": { + "spark.databricks.delta.retentionDurationCheck.enabled": false, + "spark.speculation": false + }, + "spark_version": "14.2.x-scala2.11" + } + }, + "cluster4": { + "default": { + "node_type_id": "Standard_DS3_v2", + "num_workers": 4, + "spark_conf": { + "spark.databricks.delta.retentionDurationCheck.enabled": false, + "spark.speculation": false + }, + "spark_version": "14.2.x-scala2.11" + }, + "description": "A cluster definition", + "type": "complex", + "value": { + "node_type_id": "Standard_DS3_v2", + "num_workers": 4, + "spark_conf": { + "spark.databricks.delta.retentionDurationCheck.enabled": false, + "spark.speculation": false + }, + "spark_version": "14.2.x-scala2.11" + } + } + } +} diff --git a/acceptance/bundle/variables/complex_multiple_files/script b/acceptance/bundle/variables/complex_multiple_files/script new file mode 100644 index 000000000..24f1d58d5 --- /dev/null +++ b/acceptance/bundle/variables/complex_multiple_files/script @@ -0,0 +1 @@ +$CLI bundle validate -t dev -o json | jq '{resources, variables}' diff --git a/bundle/tests/variables/complex_multiple_files/variables/clusters.yml b/acceptance/bundle/variables/complex_multiple_files/variables/clusters.yml similarity index 100% rename from bundle/tests/variables/complex_multiple_files/variables/clusters.yml rename to acceptance/bundle/variables/complex_multiple_files/variables/clusters.yml diff --git a/bundle/tests/variables/empty/databricks.yml b/acceptance/bundle/variables/empty/databricks.yml similarity index 100% rename from bundle/tests/variables/empty/databricks.yml rename to acceptance/bundle/variables/empty/databricks.yml diff --git a/acceptance/bundle/variables/empty/output.txt b/acceptance/bundle/variables/empty/output.txt new file mode 100644 index 000000000..c3f0af130 --- /dev/null +++ b/acceptance/bundle/variables/empty/output.txt @@ -0,0 +1,11 @@ +Error: no value assigned to required variable a. Assignment can be done through the "--var" flag or by setting the BUNDLE_VAR_a environment variable + +Name: empty${var.a} +Target: default +Workspace: + User: tester@databricks.com + Path: /Workspace/Users/tester@databricks.com/.bundle/empty${var.a}/default + +Found 1 error + +Exit code: 1 diff --git a/acceptance/bundle/variables/empty/script b/acceptance/bundle/variables/empty/script new file mode 100644 index 000000000..72555b332 --- /dev/null +++ b/acceptance/bundle/variables/empty/script @@ -0,0 +1 @@ +$CLI bundle validate diff --git a/bundle/tests/variables/env_overrides/databricks.yml b/acceptance/bundle/variables/env_overrides/databricks.yml similarity index 100% rename from bundle/tests/variables/env_overrides/databricks.yml rename to acceptance/bundle/variables/env_overrides/databricks.yml diff --git a/acceptance/bundle/variables/env_overrides/output.txt b/acceptance/bundle/variables/env_overrides/output.txt new file mode 100644 index 000000000..e8fb99938 --- /dev/null +++ b/acceptance/bundle/variables/env_overrides/output.txt @@ -0,0 +1,40 @@ + +>>> $CLI bundle validate -t env-with-single-variable-override -o json +"default-a dev-b" + +>>> $CLI bundle validate -t env-with-two-variable-overrides -o json +"prod-a prod-b" + +>>> BUNDLE_VAR_b=env-var-b $CLI bundle validate -t env-with-two-variable-overrides -o json +"prod-a env-var-b" + +>>> errcode $CLI bundle validate -t env-missing-a-required-variable-assignment +Error: no value assigned to required variable b. Assignment can be done through the "--var" flag or by setting the BUNDLE_VAR_b environment variable + +Name: test bundle +Target: env-missing-a-required-variable-assignment +Workspace: + User: tester@databricks.com + Path: /Workspace/Users/tester@databricks.com/.bundle/test bundle/env-missing-a-required-variable-assignment + +Found 1 error + +Exit code: 1 + +>>> errcode $CLI bundle validate -t env-using-an-undefined-variable +Error: variable c is not defined but is assigned a value + +Name: test bundle + +Found 1 error + +Exit code: 1 + +>>> $CLI bundle validate -t env-overrides-lookup -o json +{ + "a": "default-a", + "b": "prod-b", + "d": "4321", + "e": "1234", + "f": "9876" +} diff --git a/acceptance/bundle/variables/env_overrides/script b/acceptance/bundle/variables/env_overrides/script new file mode 100644 index 000000000..30919fd8a --- /dev/null +++ b/acceptance/bundle/variables/env_overrides/script @@ -0,0 +1,6 @@ +trace $CLI bundle validate -t env-with-single-variable-override -o json | jq .workspace.profile +trace $CLI bundle validate -t env-with-two-variable-overrides -o json | jq .workspace.profile +trace BUNDLE_VAR_b=env-var-b $CLI bundle validate -t env-with-two-variable-overrides -o json | jq .workspace.profile +trace errcode $CLI bundle validate -t env-missing-a-required-variable-assignment +trace errcode $CLI bundle validate -t env-using-an-undefined-variable +trace $CLI bundle validate -t env-overrides-lookup -o json | jq '.variables | map_values(.value)' diff --git a/acceptance/bundle/variables/resolve-builtin/databricks.yml b/acceptance/bundle/variables/resolve-builtin/databricks.yml new file mode 100644 index 000000000..4bb71c8db --- /dev/null +++ b/acceptance/bundle/variables/resolve-builtin/databricks.yml @@ -0,0 +1,6 @@ +bundle: + name: TestResolveVariableReferences + +workspace: + root_path: "${bundle.name}/bar" + file_path: "${workspace.root_path}/baz" diff --git a/acceptance/bundle/variables/resolve-builtin/output.txt b/acceptance/bundle/variables/resolve-builtin/output.txt new file mode 100644 index 000000000..2f58abd8a --- /dev/null +++ b/acceptance/bundle/variables/resolve-builtin/output.txt @@ -0,0 +1,11 @@ +{ + "artifact_path": "TestResolveVariableReferences/bar/artifacts", + "current_user": { + "short_name": "tester", + "userName": "tester@databricks.com" + }, + "file_path": "TestResolveVariableReferences/bar/baz", + "resource_path": "TestResolveVariableReferences/bar/resources", + "root_path": "TestResolveVariableReferences/bar", + "state_path": "TestResolveVariableReferences/bar/state" +} diff --git a/acceptance/bundle/variables/resolve-builtin/script b/acceptance/bundle/variables/resolve-builtin/script new file mode 100644 index 000000000..fefd9abe6 --- /dev/null +++ b/acceptance/bundle/variables/resolve-builtin/script @@ -0,0 +1 @@ +$CLI bundle validate -o json | jq .workspace diff --git a/acceptance/bundle/variables/resolve-empty/databricks.yml b/acceptance/bundle/variables/resolve-empty/databricks.yml new file mode 100644 index 000000000..7563ada34 --- /dev/null +++ b/acceptance/bundle/variables/resolve-empty/databricks.yml @@ -0,0 +1,10 @@ +bundle: + name: TestResolveVariableReferencesToEmptyFields + git: + branch: "" + +resources: + jobs: + job1: + tags: + git_branch: "${bundle.git.branch}" diff --git a/acceptance/bundle/variables/resolve-empty/output.txt b/acceptance/bundle/variables/resolve-empty/output.txt new file mode 100644 index 000000000..a05cbbf54 --- /dev/null +++ b/acceptance/bundle/variables/resolve-empty/output.txt @@ -0,0 +1,3 @@ +{ + "git_branch": "" +} diff --git a/acceptance/bundle/variables/resolve-empty/script b/acceptance/bundle/variables/resolve-empty/script new file mode 100644 index 000000000..614673054 --- /dev/null +++ b/acceptance/bundle/variables/resolve-empty/script @@ -0,0 +1 @@ +$CLI bundle validate -o json | jq .resources.jobs.job1.tags diff --git a/acceptance/bundle/variables/resolve-field-within-complex/databricks.yml b/acceptance/bundle/variables/resolve-field-within-complex/databricks.yml new file mode 100644 index 000000000..7250dd5df --- /dev/null +++ b/acceptance/bundle/variables/resolve-field-within-complex/databricks.yml @@ -0,0 +1,16 @@ +bundle: + name: TestResolveComplexVariableReferencesToFields + +variables: + cluster: + type: "complex" + default: + node_type_id: "Standard_DS3_v2" + num_workers: 2 + +resources: + jobs: + job1: + job_clusters: + - new_cluster: + node_type_id: "${var.cluster.node_type_id}" diff --git a/acceptance/bundle/variables/resolve-field-within-complex/output.txt b/acceptance/bundle/variables/resolve-field-within-complex/output.txt new file mode 100644 index 000000000..1f6bdbbf4 --- /dev/null +++ b/acceptance/bundle/variables/resolve-field-within-complex/output.txt @@ -0,0 +1,3 @@ +{ + "node_type_id": "Standard_DS3_v2" +} diff --git a/acceptance/bundle/variables/resolve-field-within-complex/script b/acceptance/bundle/variables/resolve-field-within-complex/script new file mode 100644 index 000000000..a885870a5 --- /dev/null +++ b/acceptance/bundle/variables/resolve-field-within-complex/script @@ -0,0 +1 @@ +$CLI bundle validate -o json | jq .resources.jobs.job1.job_clusters[0].new_cluster diff --git a/bundle/tests/variables/vanilla/databricks.yml b/acceptance/bundle/variables/vanilla/databricks.yml similarity index 100% rename from bundle/tests/variables/vanilla/databricks.yml rename to acceptance/bundle/variables/vanilla/databricks.yml diff --git a/acceptance/bundle/variables/vanilla/output.txt b/acceptance/bundle/variables/vanilla/output.txt new file mode 100644 index 000000000..69b358a3f --- /dev/null +++ b/acceptance/bundle/variables/vanilla/output.txt @@ -0,0 +1,16 @@ + +>>> BUNDLE_VAR_b=def $CLI bundle validate -o json +"abc def" + +>>> errcode $CLI bundle validate +Error: no value assigned to required variable b. Assignment can be done through the "--var" flag or by setting the BUNDLE_VAR_b environment variable + +Name: ${var.a} ${var.b} +Target: default +Workspace: + User: tester@databricks.com + Path: /Workspace/Users/tester@databricks.com/.bundle/${var.a} ${var.b}/default + +Found 1 error + +Exit code: 1 diff --git a/acceptance/bundle/variables/vanilla/script b/acceptance/bundle/variables/vanilla/script new file mode 100644 index 000000000..10da3183d --- /dev/null +++ b/acceptance/bundle/variables/vanilla/script @@ -0,0 +1,2 @@ +trace BUNDLE_VAR_b=def $CLI bundle validate -o json | jq .bundle.name +trace errcode $CLI bundle validate diff --git a/bundle/tests/variables/variable_overrides_in_target/databricks.yml b/acceptance/bundle/variables/variable_overrides_in_target/databricks.yml similarity index 100% rename from bundle/tests/variables/variable_overrides_in_target/databricks.yml rename to acceptance/bundle/variables/variable_overrides_in_target/databricks.yml diff --git a/acceptance/bundle/variables/variable_overrides_in_target/output.txt b/acceptance/bundle/variables/variable_overrides_in_target/output.txt new file mode 100644 index 000000000..de193f5b6 --- /dev/null +++ b/acceptance/bundle/variables/variable_overrides_in_target/output.txt @@ -0,0 +1,84 @@ + +>>> $CLI bundle validate -o json -t use-default-variable-values +{ + "pipelines": { + "my_pipeline": { + "clusters": [ + { + "label": "default", + "num_workers": 42 + } + ], + "continuous": true, + "deployment": { + "kind": "BUNDLE", + "metadata_file_path": "/Workspace/Users/tester@databricks.com/.bundle/foobar/use-default-variable-values/state/metadata.json" + }, + "name": "a_string", + "permissions": [] + } + } +} + +>>> $CLI bundle validate -o json -t override-string-variable +{ + "pipelines": { + "my_pipeline": { + "clusters": [ + { + "label": "default", + "num_workers": 42 + } + ], + "continuous": true, + "deployment": { + "kind": "BUNDLE", + "metadata_file_path": "/Workspace/Users/tester@databricks.com/.bundle/foobar/override-string-variable/state/metadata.json" + }, + "name": "overridden_string", + "permissions": [] + } + } +} + +>>> $CLI bundle validate -o json -t override-int-variable +{ + "pipelines": { + "my_pipeline": { + "clusters": [ + { + "label": "default", + "num_workers": 43 + } + ], + "continuous": true, + "deployment": { + "kind": "BUNDLE", + "metadata_file_path": "/Workspace/Users/tester@databricks.com/.bundle/foobar/override-int-variable/state/metadata.json" + }, + "name": "a_string", + "permissions": [] + } + } +} + +>>> $CLI bundle validate -o json -t override-both-bool-and-string-variables +{ + "pipelines": { + "my_pipeline": { + "clusters": [ + { + "label": "default", + "num_workers": 42 + } + ], + "continuous": false, + "deployment": { + "kind": "BUNDLE", + "metadata_file_path": "/Workspace/Users/tester@databricks.com/.bundle/foobar/override-both-bool-and-string-variables/state/metadata.json" + }, + "name": "overridden_string", + "permissions": [] + } + } +} diff --git a/acceptance/bundle/variables/variable_overrides_in_target/script b/acceptance/bundle/variables/variable_overrides_in_target/script new file mode 100644 index 000000000..686b3102a --- /dev/null +++ b/acceptance/bundle/variables/variable_overrides_in_target/script @@ -0,0 +1,4 @@ +trace $CLI bundle validate -o json -t use-default-variable-values | jq .resources +trace $CLI bundle validate -o json -t override-string-variable | jq .resources +trace $CLI bundle validate -o json -t override-int-variable | jq .resources +trace $CLI bundle validate -o json -t override-both-bool-and-string-variables | jq .resources diff --git a/bundle/tests/variables/without_definition/databricks.yml b/acceptance/bundle/variables/without_definition/databricks.yml similarity index 53% rename from bundle/tests/variables/without_definition/databricks.yml rename to acceptance/bundle/variables/without_definition/databricks.yml index 68227b683..c26a85f56 100644 --- a/bundle/tests/variables/without_definition/databricks.yml +++ b/acceptance/bundle/variables/without_definition/databricks.yml @@ -1,3 +1,5 @@ +bundle: + name: x variables: a: b: diff --git a/acceptance/bundle/variables/without_definition/output.txt b/acceptance/bundle/variables/without_definition/output.txt new file mode 100644 index 000000000..4dd1e6609 --- /dev/null +++ b/acceptance/bundle/variables/without_definition/output.txt @@ -0,0 +1,4 @@ +{ + "a": "foo", + "b": "bar" +} diff --git a/acceptance/bundle/variables/without_definition/script b/acceptance/bundle/variables/without_definition/script new file mode 100644 index 000000000..49b9b5448 --- /dev/null +++ b/acceptance/bundle/variables/without_definition/script @@ -0,0 +1 @@ +BUNDLE_VAR_a=foo BUNDLE_VAR_b=bar $CLI bundle validate -o json | jq '.variables | map_values(.value)' diff --git a/acceptance/help/output.txt b/acceptance/help/output.txt new file mode 100644 index 000000000..ed4a88ce6 --- /dev/null +++ b/acceptance/help/output.txt @@ -0,0 +1,143 @@ +Databricks CLI + +Usage: + databricks [command] + +Databricks Workspace + fs Filesystem related commands + git-credentials Registers personal access token for Databricks to do operations on behalf of the user. + repos The Repos API allows users to manage their git repos. + secrets The Secrets API allows you to manage secrets, secret scopes, and access permissions. + workspace The Workspace API allows you to list, import, export, and delete notebooks and folders. + +Compute + cluster-policies You can use cluster policies to control users' ability to configure clusters based on a set of rules. + clusters The Clusters API allows you to create, start, edit, list, terminate, and delete clusters. + global-init-scripts The Global Init Scripts API enables Workspace administrators to configure global initialization scripts for their workspace. + instance-pools Instance Pools API are used to create, edit, delete and list instance pools by using ready-to-use cloud instances which reduces a cluster start and auto-scaling times. + instance-profiles The Instance Profiles API allows admins to add, list, and remove instance profiles that users can launch clusters with. + libraries The Libraries API allows you to install and uninstall libraries and get the status of libraries on a cluster. + policy-compliance-for-clusters The policy compliance APIs allow you to view and manage the policy compliance status of clusters in your workspace. + policy-families View available policy families. + +Workflows + jobs The Jobs API allows you to create, edit, and delete jobs. + policy-compliance-for-jobs The compliance APIs allow you to view and manage the policy compliance status of jobs in your workspace. + +Delta Live Tables + pipelines The Delta Live Tables API allows you to create, edit, delete, start, and view details about pipelines. + +Machine Learning + experiments Experiments are the primary unit of organization in MLflow; all MLflow runs belong to an experiment. + model-registry Note: This API reference documents APIs for the Workspace Model Registry. + +Real-time Serving + serving-endpoints The Serving Endpoints API allows you to create, update, and delete model serving endpoints. + +Identity and Access Management + current-user This API allows retrieving information about currently authenticated user or service principal. + groups Groups simplify identity management, making it easier to assign access to Databricks workspace, data, and other securable objects. + permissions Permissions API are used to create read, write, edit, update and manage access for various users on different objects and endpoints. + service-principals Identities for use with jobs, automated tools, and systems such as scripts, apps, and CI/CD platforms. + users User identities recognized by Databricks and represented by email addresses. + +Databricks SQL + alerts The alerts API can be used to perform CRUD operations on alerts. + alerts-legacy The alerts API can be used to perform CRUD operations on alerts. + dashboards In general, there is little need to modify dashboards using the API. + data-sources This API is provided to assist you in making new query objects. + queries The queries API can be used to perform CRUD operations on queries. + queries-legacy These endpoints are used for CRUD operations on query definitions. + query-history A service responsible for storing and retrieving the list of queries run against SQL endpoints and serverless compute. + warehouses A SQL warehouse is a compute resource that lets you run SQL commands on data objects within Databricks SQL. + +Unity Catalog + artifact-allowlists In Databricks Runtime 13.3 and above, you can add libraries and init scripts to the allowlist in UC so that users can leverage these artifacts on compute configured with shared access mode. + catalogs A catalog is the first layer of Unity Catalog’s three-level namespace. + connections Connections allow for creating a connection to an external data source. + credentials A credential represents an authentication and authorization mechanism for accessing services on your cloud tenant. + external-locations An external location is an object that combines a cloud storage path with a storage credential that authorizes access to the cloud storage path. + functions Functions implement User-Defined Functions (UDFs) in Unity Catalog. + grants In Unity Catalog, data is secure by default. + metastores A metastore is the top-level container of objects in Unity Catalog. + model-versions Databricks provides a hosted version of MLflow Model Registry in Unity Catalog. + online-tables Online tables provide lower latency and higher QPS access to data from Delta tables. + quality-monitors A monitor computes and monitors data or model quality metrics for a table over time. + registered-models Databricks provides a hosted version of MLflow Model Registry in Unity Catalog. + resource-quotas Unity Catalog enforces resource quotas on all securable objects, which limits the number of resources that can be created. + schemas A schema (also called a database) is the second layer of Unity Catalog’s three-level namespace. + storage-credentials A storage credential represents an authentication and authorization mechanism for accessing data stored on your cloud tenant. + system-schemas A system schema is a schema that lives within the system catalog. + table-constraints Primary key and foreign key constraints encode relationships between fields in tables. + tables A table resides in the third layer of Unity Catalog’s three-level namespace. + temporary-table-credentials Temporary Table Credentials refer to short-lived, downscoped credentials used to access cloud storage locationswhere table data is stored in Databricks. + volumes Volumes are a Unity Catalog (UC) capability for accessing, storing, governing, organizing and processing files. + workspace-bindings A securable in Databricks can be configured as __OPEN__ or __ISOLATED__. + +Delta Sharing + providers A data provider is an object representing the organization in the real world who shares the data. + recipient-activation The Recipient Activation API is only applicable in the open sharing model where the recipient object has the authentication type of TOKEN. + recipients A recipient is an object you create using :method:recipients/create to represent an organization which you want to allow access shares. + shares A share is a container instantiated with :method:shares/create. + +Settings + ip-access-lists IP Access List enables admins to configure IP access lists. + notification-destinations The notification destinations API lets you programmatically manage a workspace's notification destinations. + settings Workspace Settings API allows users to manage settings at the workspace level. + token-management Enables administrators to get all tokens and delete tokens for other users. + tokens The Token API allows you to create, list, and revoke tokens that can be used to authenticate and access Databricks REST APIs. + workspace-conf This API allows updating known workspace settings for advanced users. + +Developer Tools + bundle Databricks Asset Bundles let you express data/AI/analytics projects as code. + sync Synchronize a local directory to a workspace directory + +Vector Search + vector-search-endpoints **Endpoint**: Represents the compute resources to host vector search indexes. + vector-search-indexes **Index**: An efficient representation of your embedding vectors that supports real-time and efficient approximate nearest neighbor (ANN) search queries. + +Dashboards + lakeview These APIs provide specific management operations for Lakeview dashboards. + +Marketplace + consumer-fulfillments Fulfillments are entities that allow consumers to preview installations. + consumer-installations Installations are entities that allow consumers to interact with Databricks Marketplace listings. + consumer-listings Listings are the core entities in the Marketplace. + consumer-personalization-requests Personalization Requests allow customers to interact with the individualized Marketplace listing flow. + consumer-providers Providers are the entities that publish listings to the Marketplace. + provider-exchange-filters Marketplace exchanges filters curate which groups can access an exchange. + provider-exchanges Marketplace exchanges allow providers to share their listings with a curated set of customers. + provider-files Marketplace offers a set of file APIs for various purposes such as preview notebooks and provider icons. + provider-listings Listings are the core entities in the Marketplace. + provider-personalization-requests Personalization requests are an alternate to instantly available listings. + provider-provider-analytics-dashboards Manage templated analytics solution for providers. + provider-providers Providers are entities that manage assets in Marketplace. + +Apps + apps Apps run directly on a customer’s Databricks instance, integrate with their data, use and extend Databricks services, and enable users to interact through single sign-on. + apps Apps run directly on a customer’s Databricks instance, integrate with their data, use and extend Databricks services, and enable users to interact through single sign-on. + +Clean Rooms + clean-room-assets Clean room assets are data and code objects — Tables, volumes, and notebooks that are shared with the clean room. + clean-room-task-runs Clean room task runs are the executions of notebooks in a clean room. + clean-rooms A clean room uses Delta Sharing and serverless compute to provide a secure and privacy-protecting environment where multiple parties can work together on sensitive enterprise data without direct access to each other’s data. + +Additional Commands: + account Databricks Account Commands + api Perform Databricks API call + auth Authentication related commands + completion Generate the autocompletion script for the specified shell + configure Configure authentication + help Help about any command + labs Manage Databricks Labs installations + version Retrieve information about the current version of this CLI + +Flags: + --debug enable debug logging + -h, --help help for databricks + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + -v, --version version for databricks + +Use "databricks [command] --help" for more information about a command. diff --git a/acceptance/help/script b/acceptance/help/script new file mode 100644 index 000000000..5fa569470 --- /dev/null +++ b/acceptance/help/script @@ -0,0 +1 @@ +$CLI diff --git a/acceptance/script.cleanup b/acceptance/script.cleanup new file mode 100644 index 000000000..3c3e29ebc --- /dev/null +++ b/acceptance/script.cleanup @@ -0,0 +1 @@ +rm -fr .databricks .gitignore diff --git a/acceptance/script.prepare b/acceptance/script.prepare new file mode 100644 index 000000000..3f1bb2acc --- /dev/null +++ b/acceptance/script.prepare @@ -0,0 +1,36 @@ +# Prevent CLI from downloading terraform in each test: +export DATABRICKS_TF_EXEC_PATH=/tmp/ + +errcode() { + # Temporarily disable 'set -e' to prevent the script from exiting on error + set +e + # Execute the provided command with all arguments + "$@" + local exit_code=$? + # Re-enable 'set -e' if it was previously set + set -e + >&2 printf "\nExit code: $exit_code\n" +} + +trace() { + >&2 printf "\n>>> %s\n" "$*" + + if [[ "$1" == *"="* ]]; then + # If the first argument contains '=', collect all env vars + local env_vars=() + while [[ "$1" == *"="* ]]; do + env_vars+=("$1") + shift + done + # Export environment variables in a subshell and execute the command + ( + export "${env_vars[@]}" + "$@" + ) + else + # Execute the command normally + "$@" + fi + + return $? +} diff --git a/acceptance/server_test.go b/acceptance/server_test.go new file mode 100644 index 000000000..7b21e198f --- /dev/null +++ b/acceptance/server_test.go @@ -0,0 +1,129 @@ +package acceptance_test + +import ( + "encoding/json" + "net" + "net/http" + "net/http/httptest" + "testing" + + "github.com/databricks/databricks-sdk-go/service/compute" + "github.com/databricks/databricks-sdk-go/service/iam" + "github.com/databricks/databricks-sdk-go/service/workspace" +) + +type TestServer struct { + *httptest.Server + Mux *http.ServeMux + Port int +} + +type HandlerFunc func(r *http.Request) (any, error) + +func NewTestServer() *TestServer { + mux := http.NewServeMux() + server := httptest.NewServer(mux) + port := server.Listener.Addr().(*net.TCPAddr).Port + + return &TestServer{ + Server: server, + Mux: mux, + Port: port, + } +} + +func (s *TestServer) Handle(pattern string, handler HandlerFunc) { + s.Mux.HandleFunc(pattern, func(w http.ResponseWriter, r *http.Request) { + resp, err := handler(r) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.Header().Set("Content-Type", "application/json") + + var respBytes []byte + + respString, ok := resp.(string) + if ok { + respBytes = []byte(respString) + } else { + respBytes, err = json.MarshalIndent(resp, "", " ") + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + } + + if _, err := w.Write(respBytes); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + }) +} + +func StartServer(t *testing.T) *TestServer { + server := NewTestServer() + t.Cleanup(func() { + server.Close() + }) + return server +} + +func AddHandlers(server *TestServer) { + server.Handle("/api/2.0/policies/clusters/list", func(r *http.Request) (any, error) { + return compute.ListPoliciesResponse{ + Policies: []compute.Policy{ + { + PolicyId: "5678", + Name: "wrong-cluster-policy", + }, + { + PolicyId: "9876", + Name: "some-test-cluster-policy", + }, + }, + }, nil + }) + + server.Handle("/api/2.0/instance-pools/list", func(r *http.Request) (any, error) { + return compute.ListInstancePools{ + InstancePools: []compute.InstancePoolAndStats{ + { + InstancePoolName: "some-test-instance-pool", + InstancePoolId: "1234", + }, + }, + }, nil + }) + + server.Handle("/api/2.1/clusters/list", func(r *http.Request) (any, error) { + return compute.ListClustersResponse{ + Clusters: []compute.ClusterDetails{ + { + ClusterName: "some-test-cluster", + ClusterId: "4321", + }, + { + ClusterName: "some-other-cluster", + ClusterId: "9876", + }, + }, + }, nil + }) + + server.Handle("/api/2.0/preview/scim/v2/Me", func(r *http.Request) (any, error) { + return iam.User{ + UserName: "tester@databricks.com", + }, nil + }) + + server.Handle("/api/2.0/workspace/get-status", func(r *http.Request) (any, error) { + return workspace.ObjectInfo{ + ObjectId: 1001, + ObjectType: "DIRECTORY", + Path: "", + ResourceId: "1001", + }, nil + }) +} diff --git a/bundle/artifacts/expand_globs_test.go b/bundle/artifacts/expand_globs_test.go index dc7c77de7..264c52c50 100644 --- a/bundle/artifacts/expand_globs_test.go +++ b/bundle/artifacts/expand_globs_test.go @@ -2,7 +2,6 @@ package artifacts import ( "context" - "fmt" "path/filepath" "testing" @@ -88,16 +87,16 @@ func TestExpandGlobs_InvalidPattern(t *testing.T) { )) assert.Len(t, diags, 4) - assert.Equal(t, fmt.Sprintf("%s: syntax error in pattern", filepath.Clean("a[.txt")), diags[0].Summary) + assert.Equal(t, filepath.Clean("a[.txt")+": syntax error in pattern", diags[0].Summary) assert.Equal(t, filepath.Join(tmpDir, "databricks.yml"), diags[0].Locations[0].File) assert.Equal(t, "artifacts.test.files[0].source", diags[0].Paths[0].String()) - assert.Equal(t, fmt.Sprintf("%s: syntax error in pattern", filepath.Clean("a[.txt")), diags[1].Summary) + assert.Equal(t, filepath.Clean("a[.txt")+": syntax error in pattern", diags[1].Summary) assert.Equal(t, filepath.Join(tmpDir, "databricks.yml"), diags[1].Locations[0].File) assert.Equal(t, "artifacts.test.files[1].source", diags[1].Paths[0].String()) - assert.Equal(t, fmt.Sprintf("%s: syntax error in pattern", filepath.Clean("../a[.txt")), diags[2].Summary) + assert.Equal(t, filepath.Clean("../a[.txt")+": syntax error in pattern", diags[2].Summary) assert.Equal(t, filepath.Join(tmpDir, "databricks.yml"), diags[2].Locations[0].File) assert.Equal(t, "artifacts.test.files[2].source", diags[2].Paths[0].String()) - assert.Equal(t, fmt.Sprintf("%s: syntax error in pattern", filepath.Clean("subdir/a[.txt")), diags[3].Summary) + assert.Equal(t, filepath.Clean("subdir/a[.txt")+": syntax error in pattern", diags[3].Summary) assert.Equal(t, filepath.Join(tmpDir, "databricks.yml"), diags[3].Locations[0].File) assert.Equal(t, "artifacts.test.files[3].source", diags[3].Paths[0].String()) } diff --git a/bundle/artifacts/whl/infer.go b/bundle/artifacts/whl/infer.go index 604bfc449..9c40360be 100644 --- a/bundle/artifacts/whl/infer.go +++ b/bundle/artifacts/whl/infer.go @@ -32,7 +32,7 @@ func (m *infer) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { //) py := python.GetExecutable() - artifact.BuildCommand = fmt.Sprintf(`%s setup.py bdist_wheel`, py) + artifact.BuildCommand = py + " setup.py bdist_wheel" return nil } diff --git a/bundle/bundle.go b/bundle/bundle.go index 573bcef2f..1f5e2a294 100644 --- a/bundle/bundle.go +++ b/bundle/bundle.go @@ -8,6 +8,7 @@ package bundle import ( "context" + "errors" "fmt" "os" "path/filepath" @@ -234,7 +235,7 @@ func (b *Bundle) GetSyncIncludePatterns(ctx context.Context) ([]string, error) { // we call into from this bundle context. func (b *Bundle) AuthEnv() (map[string]string, error) { if b.client == nil { - return nil, fmt.Errorf("workspace client not initialized yet") + return nil, errors.New("workspace client not initialized yet") } cfg := b.client.Config diff --git a/bundle/config/artifact.go b/bundle/config/artifact.go index 9a5690f57..177799e11 100644 --- a/bundle/config/artifact.go +++ b/bundle/config/artifact.go @@ -2,7 +2,7 @@ package config import ( "context" - "fmt" + "errors" "github.com/databricks/cli/libs/exec" ) @@ -37,7 +37,7 @@ type Artifact struct { func (a *Artifact) Build(ctx context.Context) ([]byte, error) { if a.BuildCommand == "" { - return nil, fmt.Errorf("no build property defined") + return nil, errors.New("no build property defined") } var e *exec.Executor diff --git a/bundle/config/experimental.go b/bundle/config/experimental.go index 4c787168f..7ecac5d7d 100644 --- a/bundle/config/experimental.go +++ b/bundle/config/experimental.go @@ -27,9 +27,33 @@ type Experimental struct { // PyDABs determines whether to load the 'databricks-pydabs' package. // // PyDABs allows to define bundle configuration using Python. + // PyDABs is deprecated use Python instead. PyDABs PyDABs `json:"pydabs,omitempty"` + + // Python configures loading of Python code defined with 'databricks-bundles' package. + Python Python `json:"python,omitempty"` } +type Python struct { + // Resources contains a list of fully qualified function paths to load resources + // defined in Python code. + // + // Example: ["my_project.resources:load_resources"] + Resources []string `json:"resources"` + + // Mutators contains a list of fully qualified function paths to mutator functions. + // + // Example: ["my_project.mutators:add_default_cluster"] + Mutators []string `json:"mutators"` + + // VEnvPath is path to the virtual environment. + // + // If enabled, Python code will execute within this environment. If disabled, + // it defaults to using the Python interpreter available in the current shell. + VEnvPath string `json:"venv_path,omitempty"` +} + +// PyDABs is deprecated use Python instead type PyDABs struct { // Enabled is a flag to enable the feature. Enabled bool `json:"enabled,omitempty"` diff --git a/bundle/config/mutator/apply_presets.go b/bundle/config/mutator/apply_presets.go index 381703756..59b8547be 100644 --- a/bundle/config/mutator/apply_presets.go +++ b/bundle/config/mutator/apply_presets.go @@ -9,7 +9,6 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" - "github.com/databricks/cli/libs/dbr" "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/dyn" "github.com/databricks/cli/libs/textutil" @@ -222,27 +221,6 @@ func (m *applyPresets) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnos dashboard.DisplayName = prefix + dashboard.DisplayName } - if config.IsExplicitlyEnabled((b.Config.Presets.SourceLinkedDeployment)) { - isDatabricksWorkspace := dbr.RunsOnRuntime(ctx) && strings.HasPrefix(b.SyncRootPath, "/Workspace/") - if !isDatabricksWorkspace { - target := b.Config.Bundle.Target - path := dyn.NewPath(dyn.Key("targets"), dyn.Key(target), dyn.Key("presets"), dyn.Key("source_linked_deployment")) - diags = diags.Append( - diag.Diagnostic{ - Severity: diag.Warning, - Summary: "source-linked deployment is available only in the Databricks Workspace", - Paths: []dyn.Path{ - path, - }, - Locations: b.Config.GetLocations(path[2:].String()), - }, - ) - - disabled := false - b.Config.Presets.SourceLinkedDeployment = &disabled - } - } - return diags } diff --git a/bundle/config/mutator/apply_presets_test.go b/bundle/config/mutator/apply_presets_test.go index c26f20383..5e3f942cc 100644 --- a/bundle/config/mutator/apply_presets_test.go +++ b/bundle/config/mutator/apply_presets_test.go @@ -2,16 +2,12 @@ package mutator_test import ( "context" - "runtime" "testing" "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config/mutator" "github.com/databricks/cli/bundle/config/resources" - "github.com/databricks/cli/bundle/internal/bundletest" - "github.com/databricks/cli/libs/dbr" - "github.com/databricks/cli/libs/dyn" "github.com/databricks/databricks-sdk-go/service/catalog" "github.com/databricks/databricks-sdk-go/service/jobs" "github.com/stretchr/testify/require" @@ -398,87 +394,3 @@ func TestApplyPresetsResourceNotDefined(t *testing.T) { }) } } - -func TestApplyPresetsSourceLinkedDeployment(t *testing.T) { - if runtime.GOOS == "windows" { - t.Skip("this test is not applicable on Windows because source-linked mode works only in the Databricks Workspace") - } - - testContext := context.Background() - enabled := true - disabled := false - workspacePath := "/Workspace/user.name@company.com" - - tests := []struct { - bundlePath string - ctx context.Context - name string - initialValue *bool - expectedValue *bool - expectedWarning string - }{ - { - name: "preset enabled, bundle in Workspace, databricks runtime", - bundlePath: workspacePath, - ctx: dbr.MockRuntime(testContext, true), - initialValue: &enabled, - expectedValue: &enabled, - }, - { - name: "preset enabled, bundle not in Workspace, databricks runtime", - bundlePath: "/Users/user.name@company.com", - ctx: dbr.MockRuntime(testContext, true), - initialValue: &enabled, - expectedValue: &disabled, - expectedWarning: "source-linked deployment is available only in the Databricks Workspace", - }, - { - name: "preset enabled, bundle in Workspace, not databricks runtime", - bundlePath: workspacePath, - ctx: dbr.MockRuntime(testContext, false), - initialValue: &enabled, - expectedValue: &disabled, - expectedWarning: "source-linked deployment is available only in the Databricks Workspace", - }, - { - name: "preset disabled, bundle in Workspace, databricks runtime", - bundlePath: workspacePath, - ctx: dbr.MockRuntime(testContext, true), - initialValue: &disabled, - expectedValue: &disabled, - }, - { - name: "preset nil, bundle in Workspace, databricks runtime", - bundlePath: workspacePath, - ctx: dbr.MockRuntime(testContext, true), - initialValue: nil, - expectedValue: nil, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - b := &bundle.Bundle{ - SyncRootPath: tt.bundlePath, - Config: config.Root{ - Presets: config.Presets{ - SourceLinkedDeployment: tt.initialValue, - }, - }, - } - - bundletest.SetLocation(b, "presets.source_linked_deployment", []dyn.Location{{File: "databricks.yml"}}) - diags := bundle.Apply(tt.ctx, b, mutator.ApplyPresets()) - if diags.HasError() { - t.Fatalf("unexpected error: %v", diags) - } - - if tt.expectedWarning != "" { - require.Equal(t, tt.expectedWarning, diags[0].Summary) - require.NotEmpty(t, diags[0].Locations) - } - - require.Equal(t, tt.expectedValue, b.Config.Presets.SourceLinkedDeployment) - }) - } -} diff --git a/bundle/config/mutator/apply_source_linked_deployment_preset.go b/bundle/config/mutator/apply_source_linked_deployment_preset.go new file mode 100644 index 000000000..78ccc5322 --- /dev/null +++ b/bundle/config/mutator/apply_source_linked_deployment_preset.go @@ -0,0 +1,75 @@ +package mutator + +import ( + "context" + "strings" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/libs/dbr" + "github.com/databricks/cli/libs/diag" + "github.com/databricks/cli/libs/dyn" +) + +type applySourceLinkedDeploymentPreset struct{} + +// Apply source-linked deployment preset +func ApplySourceLinkedDeploymentPreset() *applySourceLinkedDeploymentPreset { + return &applySourceLinkedDeploymentPreset{} +} + +func (m *applySourceLinkedDeploymentPreset) Name() string { + return "ApplySourceLinkedDeploymentPreset" +} + +func (m *applySourceLinkedDeploymentPreset) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + if config.IsExplicitlyDisabled(b.Config.Presets.SourceLinkedDeployment) { + return nil + } + + var diags diag.Diagnostics + isDatabricksWorkspace := dbr.RunsOnRuntime(ctx) && strings.HasPrefix(b.SyncRootPath, "/Workspace/") + target := b.Config.Bundle.Target + + if config.IsExplicitlyEnabled((b.Config.Presets.SourceLinkedDeployment)) { + if !isDatabricksWorkspace { + path := dyn.NewPath(dyn.Key("targets"), dyn.Key(target), dyn.Key("presets"), dyn.Key("source_linked_deployment")) + diags = diags.Append( + diag.Diagnostic{ + Severity: diag.Warning, + Summary: "source-linked deployment is available only in the Databricks Workspace", + Paths: []dyn.Path{ + path, + }, + Locations: b.Config.GetLocations(path[2:].String()), + }, + ) + + disabled := false + b.Config.Presets.SourceLinkedDeployment = &disabled + return diags + } + } + + if isDatabricksWorkspace && b.Config.Bundle.Mode == config.Development { + enabled := true + b.Config.Presets.SourceLinkedDeployment = &enabled + } + + if b.Config.Workspace.FilePath != "" && config.IsExplicitlyEnabled(b.Config.Presets.SourceLinkedDeployment) { + path := dyn.NewPath(dyn.Key("targets"), dyn.Key(target), dyn.Key("workspace"), dyn.Key("file_path")) + + diags = diags.Append( + diag.Diagnostic{ + Severity: diag.Warning, + Summary: "workspace.file_path setting will be ignored in source-linked deployment mode", + Paths: []dyn.Path{ + path[2:], + }, + Locations: b.Config.GetLocations(path[2:].String()), + }, + ) + } + + return diags +} diff --git a/bundle/config/mutator/apply_source_linked_deployment_preset_test.go b/bundle/config/mutator/apply_source_linked_deployment_preset_test.go new file mode 100644 index 000000000..1b74fd8e9 --- /dev/null +++ b/bundle/config/mutator/apply_source_linked_deployment_preset_test.go @@ -0,0 +1,122 @@ +package mutator_test + +import ( + "context" + "runtime" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/config/mutator" + "github.com/databricks/cli/bundle/internal/bundletest" + "github.com/databricks/cli/libs/dbr" + "github.com/databricks/cli/libs/dyn" + "github.com/stretchr/testify/require" +) + +func TestApplyPresetsSourceLinkedDeployment(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("this test is not applicable on Windows because source-linked mode works only in the Databricks Workspace") + } + + testContext := context.Background() + enabled := true + disabled := false + workspacePath := "/Workspace/user.name@company.com" + + tests := []struct { + name string + ctx context.Context + mutateBundle func(b *bundle.Bundle) + initialValue *bool + expectedValue *bool + expectedWarning string + }{ + { + name: "preset enabled, bundle in Workspace, databricks runtime", + ctx: dbr.MockRuntime(testContext, true), + initialValue: &enabled, + expectedValue: &enabled, + }, + { + name: "preset enabled, bundle not in Workspace, databricks runtime", + ctx: dbr.MockRuntime(testContext, true), + mutateBundle: func(b *bundle.Bundle) { + b.SyncRootPath = "/Users/user.name@company.com" + }, + initialValue: &enabled, + expectedValue: &disabled, + expectedWarning: "source-linked deployment is available only in the Databricks Workspace", + }, + { + name: "preset enabled, bundle in Workspace, not databricks runtime", + ctx: dbr.MockRuntime(testContext, false), + initialValue: &enabled, + expectedValue: &disabled, + expectedWarning: "source-linked deployment is available only in the Databricks Workspace", + }, + { + name: "preset disabled, bundle in Workspace, databricks runtime", + ctx: dbr.MockRuntime(testContext, true), + initialValue: &disabled, + expectedValue: &disabled, + }, + { + name: "preset nil, bundle in Workspace, databricks runtime", + ctx: dbr.MockRuntime(testContext, true), + initialValue: nil, + expectedValue: nil, + }, + { + name: "preset nil, dev mode true, bundle in Workspace, databricks runtime", + ctx: dbr.MockRuntime(testContext, true), + mutateBundle: func(b *bundle.Bundle) { + b.Config.Bundle.Mode = config.Development + }, + initialValue: nil, + expectedValue: &enabled, + }, + { + name: "preset enabled, workspace.file_path is defined by user", + ctx: dbr.MockRuntime(testContext, true), + mutateBundle: func(b *bundle.Bundle) { + b.Config.Workspace.FilePath = "file_path" + }, + initialValue: &enabled, + expectedValue: &enabled, + expectedWarning: "workspace.file_path setting will be ignored in source-linked deployment mode", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + b := &bundle.Bundle{ + SyncRootPath: workspacePath, + Config: config.Root{ + Presets: config.Presets{ + SourceLinkedDeployment: tt.initialValue, + }, + }, + } + + if tt.mutateBundle != nil { + tt.mutateBundle(b) + } + + bundletest.SetLocation(b, "presets.source_linked_deployment", []dyn.Location{{File: "databricks.yml"}}) + bundletest.SetLocation(b, "workspace.file_path", []dyn.Location{{File: "databricks.yml"}}) + + diags := bundle.Apply(tt.ctx, b, mutator.ApplySourceLinkedDeploymentPreset()) + if diags.HasError() { + t.Fatalf("unexpected error: %v", diags) + } + + if tt.expectedWarning != "" { + require.Equal(t, tt.expectedWarning, diags[0].Summary) + require.NotEmpty(t, diags[0].Locations) + } + + require.Equal(t, tt.expectedValue, b.Config.Presets.SourceLinkedDeployment) + }) + } +} diff --git a/bundle/config/mutator/expand_workspace_root.go b/bundle/config/mutator/expand_workspace_root.go index a29d129b0..2ec70548f 100644 --- a/bundle/config/mutator/expand_workspace_root.go +++ b/bundle/config/mutator/expand_workspace_root.go @@ -2,7 +2,6 @@ package mutator import ( "context" - "fmt" "path" "strings" @@ -33,7 +32,7 @@ func (m *expandWorkspaceRoot) Apply(ctx context.Context, b *bundle.Bundle) diag. } if strings.HasPrefix(root, "~/") { - home := fmt.Sprintf("/Workspace/Users/%s", currentUser.UserName) + home := "/Workspace/Users/" + currentUser.UserName b.Config.Workspace.RootPath = path.Join(home, root[2:]) } diff --git a/bundle/config/mutator/prepend_workspace_prefix.go b/bundle/config/mutator/prepend_workspace_prefix.go index b093ec26a..616759ee4 100644 --- a/bundle/config/mutator/prepend_workspace_prefix.go +++ b/bundle/config/mutator/prepend_workspace_prefix.go @@ -55,7 +55,7 @@ func (m *prependWorkspacePrefix) Apply(ctx context.Context, b *bundle.Bundle) di } } - return dyn.NewValue(fmt.Sprintf("/Workspace%s", path), v.Locations()), nil + return dyn.NewValue("/Workspace"+path, v.Locations()), nil }) if err != nil { return dyn.InvalidValue, err diff --git a/bundle/config/mutator/process_target_mode.go b/bundle/config/mutator/process_target_mode.go index df0136fad..44b53681d 100644 --- a/bundle/config/mutator/process_target_mode.go +++ b/bundle/config/mutator/process_target_mode.go @@ -6,7 +6,6 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" - "github.com/databricks/cli/libs/dbr" "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/dyn" "github.com/databricks/cli/libs/iamutil" @@ -58,14 +57,6 @@ func transformDevelopmentMode(ctx context.Context, b *bundle.Bundle) { t.TriggerPauseStatus = config.Paused } - if !config.IsExplicitlyDisabled(t.SourceLinkedDeployment) { - isInWorkspace := strings.HasPrefix(b.SyncRootPath, "/Workspace/") - if isInWorkspace && dbr.RunsOnRuntime(ctx) { - enabled := true - t.SourceLinkedDeployment = &enabled - } - } - if !config.IsExplicitlyDisabled(t.PipelinesDevelopment) { enabled := true t.PipelinesDevelopment = &enabled diff --git a/bundle/config/mutator/process_target_mode_test.go b/bundle/config/mutator/process_target_mode_test.go index c299a7636..097c092a6 100644 --- a/bundle/config/mutator/process_target_mode_test.go +++ b/bundle/config/mutator/process_target_mode_test.go @@ -3,14 +3,12 @@ package mutator import ( "context" "reflect" - "runtime" "slices" "testing" "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config/resources" - "github.com/databricks/cli/libs/dbr" "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/tags" "github.com/databricks/cli/libs/vfs" @@ -540,32 +538,3 @@ func TestPipelinesDevelopmentDisabled(t *testing.T) { assert.False(t, b.Config.Resources.Pipelines["pipeline1"].PipelineSpec.Development) } - -func TestSourceLinkedDeploymentEnabled(t *testing.T) { - b, diags := processSourceLinkedBundle(t, true) - require.NoError(t, diags.Error()) - assert.True(t, *b.Config.Presets.SourceLinkedDeployment) -} - -func TestSourceLinkedDeploymentDisabled(t *testing.T) { - b, diags := processSourceLinkedBundle(t, false) - require.NoError(t, diags.Error()) - assert.False(t, *b.Config.Presets.SourceLinkedDeployment) -} - -func processSourceLinkedBundle(t *testing.T, presetEnabled bool) (*bundle.Bundle, diag.Diagnostics) { - if runtime.GOOS == "windows" { - t.Skip("this test is not applicable on Windows because source-linked mode works only in the Databricks Workspace") - } - - b := mockBundle(config.Development) - - workspacePath := "/Workspace/lennart@company.com/" - b.SyncRootPath = workspacePath - b.Config.Presets.SourceLinkedDeployment = &presetEnabled - - ctx := dbr.MockRuntime(context.Background(), true) - m := bundle.Seq(ProcessTargetMode(), ApplyPresets()) - diags := bundle.Apply(ctx, b, m) - return b, diags -} diff --git a/bundle/config/mutator/python/python_mutator.go b/bundle/config/mutator/python/python_mutator.go index 69c1a5dd6..8009ab243 100644 --- a/bundle/config/mutator/python/python_mutator.go +++ b/bundle/config/mutator/python/python_mutator.go @@ -9,6 +9,7 @@ import ( "io" "os" "path/filepath" + "reflect" "strings" "github.com/databricks/databricks-sdk-go/logger" @@ -40,6 +41,8 @@ const ( // We also open for possibility of appending other sections of bundle configuration, // for example, adding new variables. However, this is not supported yet, and CLI rejects // such changes. + // + // Deprecated, left for backward-compatibility with PyDABs. PythonMutatorPhaseLoad phase = "load" // PythonMutatorPhaseInit is the phase after bundle configuration was loaded, and @@ -59,7 +62,46 @@ const ( // PyDABs can output YAML containing references to variables, and CLI should resolve them. // // Existing resources can't be removed, and CLI rejects such changes. + // + // Deprecated, left for backward-compatibility with PyDABs. PythonMutatorPhaseInit phase = "init" + + // PythonMutatorPhaseLoadResources is the phase in which YAML configuration was loaded. + // + // At this stage, we execute Python code to load resources defined in Python. + // + // During this process, Python code can access: + // - selected deployment target + // - bundle variable values + // - variables provided through CLI argument or environment variables + // + // The following is not available: + // - variables referencing other variables are in unresolved format + // + // Python code can output YAML referencing variables, and CLI should resolve them. + // + // Existing resources can't be removed or modified, and CLI rejects such changes. + // While it's called 'load_resources', this phase is executed in 'init' phase of mutator pipeline. + PythonMutatorPhaseLoadResources phase = "load_resources" + + // PythonMutatorPhaseApplyMutators is the phase in which resources defined in YAML or Python + // are already loaded. + // + // At this stage, we execute Python code to mutate resources defined in YAML or Python. + // + // During this process, Python code can access: + // - selected deployment target + // - bundle variable values + // - variables provided through CLI argument or environment variables + // + // The following is not available: + // - variables referencing other variables are in unresolved format + // + // Python code can output YAML referencing variables, and CLI should resolve them. + // + // Resources can't be added or removed, and CLI rejects such changes. Python code is + // allowed to modify existing resources, but not other parts of bundle configuration. + PythonMutatorPhaseApplyMutators phase = "apply_mutators" ) type pythonMutator struct { @@ -76,18 +118,64 @@ func (m *pythonMutator) Name() string { return fmt.Sprintf("PythonMutator(%s)", m.phase) } -func getExperimental(b *bundle.Bundle) config.Experimental { - if b.Config.Experimental == nil { - return config.Experimental{} +// opts is a common structure for deprecated PyDABs and upcoming Python +// configuration sections +type opts struct { + enabled bool + + venvPath string +} + +// getOpts adapts deprecated PyDABs and upcoming Python configuration +// into a common structure. +func getOpts(b *bundle.Bundle, phase phase) (opts, error) { + experimental := b.Config.Experimental + if experimental == nil { + return opts{}, nil } - return *b.Config.Experimental + // using reflect.DeepEquals in case we add more fields + pydabsEnabled := !reflect.DeepEqual(experimental.PyDABs, config.PyDABs{}) + pythonEnabled := !reflect.DeepEqual(experimental.Python, config.Python{}) + + if pydabsEnabled && pythonEnabled { + return opts{}, errors.New("both experimental/pydabs and experimental/python are enabled, only one can be enabled") + } else if pydabsEnabled { + if !experimental.PyDABs.Enabled { + return opts{}, nil + } + + // don't execute for phases for 'python' section + if phase == PythonMutatorPhaseInit || phase == PythonMutatorPhaseLoad { + return opts{ + enabled: true, + venvPath: experimental.PyDABs.VEnvPath, + }, nil + } else { + return opts{}, nil + } + } else if pythonEnabled { + // don't execute for phases for 'pydabs' section + if phase == PythonMutatorPhaseLoadResources || phase == PythonMutatorPhaseApplyMutators { + return opts{ + enabled: true, + venvPath: experimental.Python.VEnvPath, + }, nil + } else { + return opts{}, nil + } + } else { + return opts{}, nil + } } func (m *pythonMutator) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { - experimental := getExperimental(b) + opts, err := getOpts(b, m.phase) + if err != nil { + return diag.Errorf("failed to apply python mutator: %s", err) + } - if !experimental.PyDABs.Enabled { + if !opts.enabled { return nil } @@ -95,8 +183,8 @@ func (m *pythonMutator) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagno var mutateDiags diag.Diagnostics mutateDiagsHasError := errors.New("unexpected error") - err := b.Config.Mutate(func(leftRoot dyn.Value) (dyn.Value, error) { - pythonPath, err := detectExecutable(ctx, experimental.PyDABs.VEnvPath) + err = b.Config.Mutate(func(leftRoot dyn.Value) (dyn.Value, error) { + pythonPath, err := detectExecutable(ctx, opts.venvPath) if err != nil { return dyn.InvalidValue, fmt.Errorf("failed to get Python interpreter path: %w", err) } @@ -137,7 +225,7 @@ func createCacheDir(ctx context.Context) (string, error) { // support the same env variable as in b.CacheDir if tempDir, exists := env.TempDir(ctx); exists { // use 'default' as target name - cacheDir := filepath.Join(tempDir, "default", "pydabs") + cacheDir := filepath.Join(tempDir, "default", "python") err := os.MkdirAll(cacheDir, 0o700) if err != nil { @@ -147,7 +235,7 @@ func createCacheDir(ctx context.Context) (string, error) { return cacheDir, nil } - return os.MkdirTemp("", "-pydabs") + return os.MkdirTemp("", "-python") } func (m *pythonMutator) runPythonMutator(ctx context.Context, cacheDir, rootPath, pythonPath string, root dyn.Value) (dyn.Value, diag.Diagnostics) { @@ -203,7 +291,7 @@ func (m *pythonMutator) runPythonMutator(ctx context.Context, cacheDir, rootPath } // process can fail without reporting errors in diagnostics file or creating it, for instance, - // venv doesn't have PyDABs library installed + // venv doesn't have 'databricks-bundles' library installed if processErr != nil { diagnostic := diag.Diagnostic{ Severity: diag.Error, @@ -226,16 +314,15 @@ func (m *pythonMutator) runPythonMutator(ctx context.Context, cacheDir, rootPath return output, pythonDiagnostics } -const installExplanation = `If using Python wheels, ensure that 'databricks-pydabs' is included in the dependencies, -and that the wheel is installed in the Python environment: +const pythonInstallExplanation = `Ensure that 'databricks-bundles' is installed in Python environment: - $ .venv/bin/pip install -e . + $ .venv/bin/pip install databricks-bundles If using a virtual environment, ensure it is specified as the venv_path property in databricks.yml, or activate the environment before running CLI commands: experimental: - pydabs: + python: venv_path: .venv ` @@ -245,9 +332,9 @@ or activate the environment before running CLI commands: func explainProcessErr(stderr string) string { // implemented in cpython/Lib/runpy.py and portable across Python 3.x, including pypy if strings.Contains(stderr, "Error while finding module specification for 'databricks.bundles.build'") { - summary := color.CyanString("Explanation: ") + "'databricks-pydabs' library is not installed in the Python environment.\n" + summary := color.CyanString("Explanation: ") + "'databricks-bundles' library is not installed in the Python environment.\n" - return stderr + "\n" + summary + "\n" + installExplanation + return stderr + "\n" + summary + "\n" + pythonInstallExplanation } return stderr @@ -277,10 +364,10 @@ func loadOutputFile(rootPath, outputPath string) (dyn.Value, diag.Diagnostics) { // // virtualPath has to stay in rootPath, because locations outside root path are not allowed: // - // Error: path /var/folders/.../pydabs/dist/*.whl is not contained in bundle root path + // Error: path /var/folders/.../python/dist/*.whl is not contained in bundle root path // // for that, we pass virtualPath instead of outputPath as file location - virtualPath, err := filepath.Abs(filepath.Join(rootPath, "__generated_by_pydabs__.yml")) + virtualPath, err := filepath.Abs(filepath.Join(rootPath, "__generated_by_python__.yml")) if err != nil { return dyn.InvalidValue, diag.FromErr(fmt.Errorf("failed to get absolute path: %w", err)) } @@ -334,19 +421,23 @@ func loadDiagnosticsFile(path string) (diag.Diagnostics, error) { func createOverrideVisitor(ctx context.Context, phase phase) (merge.OverrideVisitor, error) { switch phase { case PythonMutatorPhaseLoad: - return createLoadOverrideVisitor(ctx), nil + return createLoadResourcesOverrideVisitor(ctx), nil case PythonMutatorPhaseInit: - return createInitOverrideVisitor(ctx), nil + return createInitOverrideVisitor(ctx, insertResourceModeAllow), nil + case PythonMutatorPhaseLoadResources: + return createLoadResourcesOverrideVisitor(ctx), nil + case PythonMutatorPhaseApplyMutators: + return createInitOverrideVisitor(ctx, insertResourceModeDisallow), nil default: return merge.OverrideVisitor{}, fmt.Errorf("unknown phase: %s", phase) } } -// createLoadOverrideVisitor creates an override visitor for the load phase. +// createLoadResourcesOverrideVisitor creates an override visitor for the load_resources phase. // -// During load, it's only possible to create new resources, and not modify or +// During load_resources, it's only possible to create new resources, and not modify or // delete existing ones. -func createLoadOverrideVisitor(ctx context.Context) merge.OverrideVisitor { +func createLoadResourcesOverrideVisitor(ctx context.Context) merge.OverrideVisitor { resourcesPath := dyn.NewPath(dyn.Key("resources")) jobsPath := dyn.NewPath(dyn.Key("resources"), dyn.Key("jobs")) @@ -385,11 +476,21 @@ func createLoadOverrideVisitor(ctx context.Context) merge.OverrideVisitor { } } +// insertResourceMode controls whether createInitOverrideVisitor allows or disallows inserting new resources. +type insertResourceMode int + +const ( + insertResourceModeDisallow insertResourceMode = iota + insertResourceModeAllow insertResourceMode = iota +) + // createInitOverrideVisitor creates an override visitor for the init phase. // // During the init phase it's possible to create new resources, modify existing // resources, but not delete existing resources. -func createInitOverrideVisitor(ctx context.Context) merge.OverrideVisitor { +// +// If mode is insertResourceModeDisallow, it matching expected behaviour of apply_mutators +func createInitOverrideVisitor(ctx context.Context, mode insertResourceMode) merge.OverrideVisitor { resourcesPath := dyn.NewPath(dyn.Key("resources")) jobsPath := dyn.NewPath(dyn.Key("resources"), dyn.Key("jobs")) @@ -424,6 +525,11 @@ func createInitOverrideVisitor(ctx context.Context) merge.OverrideVisitor { return dyn.InvalidValue, fmt.Errorf("unexpected change at %q (insert)", valuePath.String()) } + insertResource := len(valuePath) == len(jobsPath)+1 + if mode == insertResourceModeDisallow && insertResource { + return dyn.InvalidValue, fmt.Errorf("unexpected change at %q (insert)", valuePath.String()) + } + log.Debugf(ctx, "Insert value at %q", valuePath.String()) return right, nil @@ -441,9 +547,9 @@ func createInitOverrideVisitor(ctx context.Context) merge.OverrideVisitor { } func isOmitemptyDelete(left dyn.Value) bool { - // PyDABs can omit empty sequences/mappings in output, because we don't track them as optional, + // Python output can omit empty sequences/mappings, because we don't track them as optional, // there is no semantic difference between empty and missing, so we keep them as they were before - // PyDABs deleted them. + // Python mutator deleted them. switch left.Kind() { case dyn.KindMap: diff --git a/bundle/config/mutator/python/python_mutator_test.go b/bundle/config/mutator/python/python_mutator_test.go index 8bdf91d03..d51572c8a 100644 --- a/bundle/config/mutator/python/python_mutator_test.go +++ b/bundle/config/mutator/python/python_mutator_test.go @@ -2,6 +2,7 @@ package python import ( "context" + "errors" "fmt" "os" "os/exec" @@ -39,13 +40,25 @@ func TestPythonMutator_Name_init(t *testing.T) { assert.Equal(t, "PythonMutator(init)", mutator.Name()) } -func TestPythonMutator_load(t *testing.T) { +func TestPythonMutator_Name_loadResources(t *testing.T) { + mutator := PythonMutator(PythonMutatorPhaseLoadResources) + + assert.Equal(t, "PythonMutator(load_resources)", mutator.Name()) +} + +func TestPythonMutator_Name_applyMutators(t *testing.T) { + mutator := PythonMutator(PythonMutatorPhaseApplyMutators) + + assert.Equal(t, "PythonMutator(apply_mutators)", mutator.Name()) +} + +func TestPythonMutator_loadResources(t *testing.T) { withFakeVEnv(t, ".venv") b := loadYaml("databricks.yml", ` experimental: - pydabs: - enabled: true + python: + resources: ["resources:load_resources"] venv_path: .venv resources: jobs: @@ -59,12 +72,12 @@ func TestPythonMutator_load(t *testing.T) { "-m", "databricks.bundles.build", "--phase", - "load", + "load_resources", }, `{ "experimental": { - "pydabs": { - "enabled": true, + "python": { + "resources": ["resources:load_resources"], "venv_path": ".venv" } }, @@ -82,7 +95,7 @@ func TestPythonMutator_load(t *testing.T) { `{"severity": "warning", "summary": "job doesn't have any tasks", "location": {"file": "src/examples/file.py", "line": 10, "column": 5}}`, ) - mutator := PythonMutator(PythonMutatorPhaseLoad) + mutator := PythonMutator(PythonMutatorPhaseLoadResources) diags := bundle.Apply(ctx, b, mutator) assert.NoError(t, diags.Error()) @@ -108,13 +121,12 @@ func TestPythonMutator_load(t *testing.T) { }, diags[0].Locations) } -func TestPythonMutator_load_disallowed(t *testing.T) { +func TestPythonMutator_loadResources_disallowed(t *testing.T) { withFakeVEnv(t, ".venv") - b := loadYaml("databricks.yml", ` experimental: - pydabs: - enabled: true + python: + resources: ["resources:load_resources"] venv_path: .venv resources: jobs: @@ -128,12 +140,12 @@ func TestPythonMutator_load_disallowed(t *testing.T) { "-m", "databricks.bundles.build", "--phase", - "load", + "load_resources", }, `{ "experimental": { - "pydabs": { - "enabled": true, + "python": { + "resources": ["resources:load_resources"], "venv_path": ".venv" } }, @@ -147,20 +159,20 @@ func TestPythonMutator_load_disallowed(t *testing.T) { } }`, "") - mutator := PythonMutator(PythonMutatorPhaseLoad) + mutator := PythonMutator(PythonMutatorPhaseLoadResources) diag := bundle.Apply(ctx, b, mutator) assert.EqualError(t, diag.Error(), "unexpected change at \"resources.jobs.job0.description\" (insert)") } -func TestPythonMutator_init(t *testing.T) { +func TestPythonMutator_applyMutators(t *testing.T) { withFakeVEnv(t, ".venv") - b := loadYaml("databricks.yml", ` experimental: - pydabs: - enabled: true + python: venv_path: .venv + mutators: + - "mutators:add_description" resources: jobs: job0: @@ -173,13 +185,13 @@ func TestPythonMutator_init(t *testing.T) { "-m", "databricks.bundles.build", "--phase", - "init", + "apply_mutators", }, `{ "experimental": { - "pydabs": { - "enabled": true, - "venv_path": ".venv" + "python": { + "venv_path": ".venv", + "mutators": ["mutators:add_description"] } }, "resources": { @@ -192,7 +204,7 @@ func TestPythonMutator_init(t *testing.T) { } }`, "") - mutator := PythonMutator(PythonMutatorPhaseInit) + mutator := PythonMutator(PythonMutatorPhaseApplyMutators) diag := bundle.Apply(ctx, b, mutator) assert.NoError(t, diag.Error()) @@ -207,12 +219,12 @@ func TestPythonMutator_init(t *testing.T) { require.NoError(t, err) assert.Equal(t, "databricks.yml", name.Location().File) - // 'description' was updated by PyDABs and has location of generated file until + // 'description' was updated by Python code and has location of generated file until // we implement source maps description, err := dyn.GetByPath(v, dyn.MustPathFromString("resources.jobs.job0.description")) require.NoError(t, err) - expectedVirtualPath, err := filepath.Abs("__generated_by_pydabs__.yml") + expectedVirtualPath, err := filepath.Abs("__generated_by_python__.yml") require.NoError(t, err) assert.Equal(t, expectedVirtualPath, description.Location().File) @@ -223,12 +235,12 @@ func TestPythonMutator_init(t *testing.T) { func TestPythonMutator_badOutput(t *testing.T) { withFakeVEnv(t, ".venv") - b := loadYaml("databricks.yml", ` experimental: - pydabs: - enabled: true + python: venv_path: .venv + resources: + - "resources:load_resources" resources: jobs: job0: @@ -241,7 +253,7 @@ func TestPythonMutator_badOutput(t *testing.T) { "-m", "databricks.bundles.build", "--phase", - "load", + "load_resources", }, `{ "resources": { @@ -253,7 +265,7 @@ func TestPythonMutator_badOutput(t *testing.T) { } }`, "") - mutator := PythonMutator(PythonMutatorPhaseLoad) + mutator := PythonMutator(PythonMutatorPhaseLoadResources) diag := bundle.Apply(ctx, b, mutator) assert.EqualError(t, diag.Error(), "unknown field: unknown_property") @@ -269,34 +281,63 @@ func TestPythonMutator_disabled(t *testing.T) { assert.NoError(t, diag.Error()) } -func TestPythonMutator_venvRequired(t *testing.T) { - b := loadYaml("databricks.yml", ` - experimental: - pydabs: - enabled: true`) - - ctx := context.Background() - mutator := PythonMutator(PythonMutatorPhaseLoad) - diag := bundle.Apply(ctx, b, mutator) - - assert.Error(t, diag.Error(), "\"experimental.enable_pydabs\" is enabled, but \"experimental.venv.path\" is not set") -} - func TestPythonMutator_venvNotFound(t *testing.T) { expectedError := fmt.Sprintf("failed to get Python interpreter path: can't find %q, check if virtualenv is created", interpreterPath("bad_path")) b := loadYaml("databricks.yml", ` experimental: - pydabs: - enabled: true - venv_path: bad_path`) + python: + venv_path: bad_path + resources: + - "resources:load_resources"`) - mutator := PythonMutator(PythonMutatorPhaseInit) + mutator := PythonMutator(PythonMutatorPhaseLoadResources) diag := bundle.Apply(context.Background(), b, mutator) assert.EqualError(t, diag.Error(), expectedError) } +func TestGetOps_Python(t *testing.T) { + actual, err := getOpts(&bundle.Bundle{ + Config: config.Root{ + Experimental: &config.Experimental{ + Python: config.Python{ + VEnvPath: ".venv", + Resources: []string{ + "resources:load_resources", + }, + }, + }, + }, + }, PythonMutatorPhaseLoadResources) + + assert.NoError(t, err) + assert.Equal(t, opts{venvPath: ".venv", enabled: true}, actual) +} + +func TestGetOps_PyDABs(t *testing.T) { + actual, err := getOpts(&bundle.Bundle{ + Config: config.Root{ + Experimental: &config.Experimental{ + PyDABs: config.PyDABs{ + VEnvPath: ".venv", + Enabled: true, + }, + }, + }, + }, PythonMutatorPhaseInit) + + assert.NoError(t, err) + assert.Equal(t, opts{venvPath: ".venv", enabled: true}, actual) +} + +func TestGetOps_empty(t *testing.T) { + actual, err := getOpts(&bundle.Bundle{}, PythonMutatorPhaseLoadResources) + + assert.NoError(t, err) + assert.Equal(t, opts{enabled: false}, actual) +} + type createOverrideVisitorTestCase struct { name string updatePath dyn.Path @@ -314,48 +355,48 @@ func TestCreateOverrideVisitor(t *testing.T) { testCases := []createOverrideVisitorTestCase{ { - name: "load: can't change an existing job", - phase: PythonMutatorPhaseLoad, + name: "load_resources: can't change an existing job", + phase: PythonMutatorPhaseLoadResources, updatePath: dyn.MustPathFromString("resources.jobs.job0.name"), deletePath: dyn.MustPathFromString("resources.jobs.job0.name"), insertPath: dyn.MustPathFromString("resources.jobs.job0.name"), - deleteError: fmt.Errorf("unexpected change at \"resources.jobs.job0.name\" (delete)"), - insertError: fmt.Errorf("unexpected change at \"resources.jobs.job0.name\" (insert)"), - updateError: fmt.Errorf("unexpected change at \"resources.jobs.job0.name\" (update)"), + deleteError: errors.New("unexpected change at \"resources.jobs.job0.name\" (delete)"), + insertError: errors.New("unexpected change at \"resources.jobs.job0.name\" (insert)"), + updateError: errors.New("unexpected change at \"resources.jobs.job0.name\" (update)"), }, { - name: "load: can't delete an existing job", - phase: PythonMutatorPhaseLoad, + name: "load_resources: can't delete an existing job", + phase: PythonMutatorPhaseLoadResources, deletePath: dyn.MustPathFromString("resources.jobs.job0"), - deleteError: fmt.Errorf("unexpected change at \"resources.jobs.job0\" (delete)"), + deleteError: errors.New("unexpected change at \"resources.jobs.job0\" (delete)"), }, { - name: "load: can insert 'resources'", - phase: PythonMutatorPhaseLoad, + name: "load_resources: can insert 'resources'", + phase: PythonMutatorPhaseLoadResources, insertPath: dyn.MustPathFromString("resources"), insertError: nil, }, { - name: "load: can insert 'resources.jobs'", - phase: PythonMutatorPhaseLoad, + name: "load_resources: can insert 'resources.jobs'", + phase: PythonMutatorPhaseLoadResources, insertPath: dyn.MustPathFromString("resources.jobs"), insertError: nil, }, { - name: "load: can insert a job", - phase: PythonMutatorPhaseLoad, + name: "load_resources: can insert a job", + phase: PythonMutatorPhaseLoadResources, insertPath: dyn.MustPathFromString("resources.jobs.job0"), insertError: nil, }, { - name: "load: can't change include", - phase: PythonMutatorPhaseLoad, + name: "load_resources: can't change include", + phase: PythonMutatorPhaseLoadResources, deletePath: dyn.MustPathFromString("include[0]"), insertPath: dyn.MustPathFromString("include[0]"), updatePath: dyn.MustPathFromString("include[0]"), - deleteError: fmt.Errorf("unexpected change at \"include[0]\" (delete)"), - insertError: fmt.Errorf("unexpected change at \"include[0]\" (insert)"), - updateError: fmt.Errorf("unexpected change at \"include[0]\" (update)"), + deleteError: errors.New("unexpected change at \"include[0]\" (delete)"), + insertError: errors.New("unexpected change at \"include[0]\" (insert)"), + updateError: errors.New("unexpected change at \"include[0]\" (update)"), }, { name: "init: can change an existing job", @@ -371,7 +412,7 @@ func TestCreateOverrideVisitor(t *testing.T) { name: "init: can't delete an existing job", phase: PythonMutatorPhaseInit, deletePath: dyn.MustPathFromString("resources.jobs.job0"), - deleteError: fmt.Errorf("unexpected change at \"resources.jobs.job0\" (delete)"), + deleteError: errors.New("unexpected change at \"resources.jobs.job0\" (delete)"), }, { name: "init: can insert 'resources'", @@ -397,9 +438,43 @@ func TestCreateOverrideVisitor(t *testing.T) { deletePath: dyn.MustPathFromString("include[0]"), insertPath: dyn.MustPathFromString("include[0]"), updatePath: dyn.MustPathFromString("include[0]"), - deleteError: fmt.Errorf("unexpected change at \"include[0]\" (delete)"), - insertError: fmt.Errorf("unexpected change at \"include[0]\" (insert)"), - updateError: fmt.Errorf("unexpected change at \"include[0]\" (update)"), + deleteError: errors.New("unexpected change at \"include[0]\" (delete)"), + insertError: errors.New("unexpected change at \"include[0]\" (insert)"), + updateError: errors.New("unexpected change at \"include[0]\" (update)"), + }, + { + name: "apply_mutators: can't delete an existing job", + phase: PythonMutatorPhaseInit, + deletePath: dyn.MustPathFromString("resources.jobs.job0"), + deleteError: errors.New("unexpected change at \"resources.jobs.job0\" (delete)"), + }, + { + name: "apply_mutators: can insert 'resources'", + phase: PythonMutatorPhaseApplyMutators, + insertPath: dyn.MustPathFromString("resources"), + insertError: nil, + }, + { + name: "apply_mutators: can insert 'resources.jobs'", + phase: PythonMutatorPhaseApplyMutators, + insertPath: dyn.MustPathFromString("resources.jobs"), + insertError: nil, + }, + { + name: "apply_mutators: can't insert a job", + phase: PythonMutatorPhaseApplyMutators, + insertPath: dyn.MustPathFromString("resources.jobs.job0"), + insertError: errors.New("unexpected change at \"resources.jobs.job0\" (insert)"), + }, + { + name: "apply_mutators: can't change include", + phase: PythonMutatorPhaseApplyMutators, + deletePath: dyn.MustPathFromString("include[0]"), + insertPath: dyn.MustPathFromString("include[0]"), + updatePath: dyn.MustPathFromString("include[0]"), + deleteError: errors.New("unexpected change at \"include[0]\" (delete)"), + insertError: errors.New("unexpected change at \"include[0]\" (insert)"), + updateError: errors.New("unexpected change at \"include[0]\" (update)"), }, } @@ -458,9 +533,9 @@ type overrideVisitorOmitemptyTestCase struct { } func TestCreateOverrideVisitor_omitempty(t *testing.T) { - // PyDABs can omit empty sequences/mappings in output, because we don't track them as optional, + // Python output can omit empty sequences/mappings in output, because we don't track them as optional, // there is no semantic difference between empty and missing, so we keep them as they were before - // PyDABs deleted them. + // Python code deleted them. allPhases := []phase{PythonMutatorPhaseLoad, PythonMutatorPhaseInit} location := dyn.Location{ @@ -567,18 +642,17 @@ func TestExplainProcessErr(t *testing.T) { stderr := "/home/test/.venv/bin/python3: Error while finding module specification for 'databricks.bundles.build' (ModuleNotFoundError: No module named 'databricks')\n" expected := `/home/test/.venv/bin/python3: Error while finding module specification for 'databricks.bundles.build' (ModuleNotFoundError: No module named 'databricks') -Explanation: 'databricks-pydabs' library is not installed in the Python environment. +Explanation: 'databricks-bundles' library is not installed in the Python environment. -If using Python wheels, ensure that 'databricks-pydabs' is included in the dependencies, -and that the wheel is installed in the Python environment: +Ensure that 'databricks-bundles' is installed in Python environment: - $ .venv/bin/pip install -e . + $ .venv/bin/pip install databricks-bundles If using a virtual environment, ensure it is specified as the venv_path property in databricks.yml, or activate the environment before running CLI commands: experimental: - pydabs: + python: venv_path: .venv ` diff --git a/bundle/config/mutator/resolve_variable_references.go b/bundle/config/mutator/resolve_variable_references.go index 8c207e375..7ad3dfd8d 100644 --- a/bundle/config/mutator/resolve_variable_references.go +++ b/bundle/config/mutator/resolve_variable_references.go @@ -2,9 +2,10 @@ package mutator import ( "context" - "fmt" + "errors" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config/variable" "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/dyn" @@ -15,7 +16,7 @@ import ( type resolveVariableReferences struct { prefixes []string pattern dyn.Pattern - lookupFn func(dyn.Value, dyn.Path) (dyn.Value, error) + lookupFn func(dyn.Value, dyn.Path, *bundle.Bundle) (dyn.Value, error) skipFn func(dyn.Value) bool } @@ -44,16 +45,21 @@ func ResolveVariableReferencesInComplexVariables() bundle.Mutator { } } -func lookup(v dyn.Value, path dyn.Path) (dyn.Value, error) { +func lookup(v dyn.Value, path dyn.Path, b *bundle.Bundle) (dyn.Value, error) { + if config.IsExplicitlyEnabled(b.Config.Presets.SourceLinkedDeployment) { + if path.String() == "workspace.file_path" { + return dyn.V(b.SyncRootPath), nil + } + } // Future opportunity: if we lookup this path in both the given root // and the synthesized root, we know if it was explicitly set or implied to be empty. // Then we can emit a warning if it was not explicitly set. return dyn.GetByPath(v, path) } -func lookupForComplexVariables(v dyn.Value, path dyn.Path) (dyn.Value, error) { +func lookupForComplexVariables(v dyn.Value, path dyn.Path, b *bundle.Bundle) (dyn.Value, error) { if path[0].Key() != "variables" { - return lookup(v, path) + return lookup(v, path, b) } varV, err := dyn.GetByPath(v, path[:len(path)-1]) @@ -68,10 +74,10 @@ func lookupForComplexVariables(v dyn.Value, path dyn.Path) (dyn.Value, error) { } if vv.Type == variable.VariableTypeComplex { - return dyn.InvalidValue, fmt.Errorf("complex variables cannot contain references to another complex variables") + return dyn.InvalidValue, errors.New("complex variables cannot contain references to another complex variables") } - return lookup(v, path) + return lookup(v, path, b) } func skipResolvingInNonComplexVariables(v dyn.Value) bool { @@ -83,9 +89,9 @@ func skipResolvingInNonComplexVariables(v dyn.Value) bool { } } -func lookupForVariables(v dyn.Value, path dyn.Path) (dyn.Value, error) { +func lookupForVariables(v dyn.Value, path dyn.Path, b *bundle.Bundle) (dyn.Value, error) { if path[0].Key() != "variables" { - return lookup(v, path) + return lookup(v, path, b) } varV, err := dyn.GetByPath(v, path[:len(path)-1]) @@ -100,10 +106,10 @@ func lookupForVariables(v dyn.Value, path dyn.Path) (dyn.Value, error) { } if vv.Lookup != nil && vv.Lookup.String() != "" { - return dyn.InvalidValue, fmt.Errorf("lookup variables cannot contain references to another lookup variables") + return dyn.InvalidValue, errors.New("lookup variables cannot contain references to another lookup variables") } - return lookup(v, path) + return lookup(v, path, b) } func (*resolveVariableReferences) Name() string { @@ -125,6 +131,7 @@ func (m *resolveVariableReferences) Apply(ctx context.Context, b *bundle.Bundle) varPath := dyn.NewPath(dyn.Key("var")) var diags diag.Diagnostics + err := b.Config.Mutate(func(root dyn.Value) (dyn.Value, error) { // Synthesize a copy of the root that has all fields that are present in the type // but not set in the dynamic value set to their corresponding empty value. @@ -167,7 +174,7 @@ func (m *resolveVariableReferences) Apply(ctx context.Context, b *bundle.Bundle) if m.skipFn != nil && m.skipFn(v) { return dyn.InvalidValue, dynvar.ErrSkipResolution } - return m.lookupFn(normalized, path) + return m.lookupFn(normalized, path, b) } } diff --git a/bundle/config/mutator/resolve_variable_references_test.go b/bundle/config/mutator/resolve_variable_references_test.go index 07972ecf4..18bb022aa 100644 --- a/bundle/config/mutator/resolve_variable_references_test.go +++ b/bundle/config/mutator/resolve_variable_references_test.go @@ -12,36 +12,11 @@ import ( "github.com/databricks/cli/libs/dyn" "github.com/databricks/databricks-sdk-go/service/compute" "github.com/databricks/databricks-sdk-go/service/jobs" + "github.com/databricks/databricks-sdk-go/service/pipelines" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) -func TestResolveVariableReferences(t *testing.T) { - b := &bundle.Bundle{ - Config: config.Root{ - Bundle: config.Bundle{ - Name: "example", - }, - Workspace: config.Workspace{ - RootPath: "${bundle.name}/bar", - FilePath: "${workspace.root_path}/baz", - }, - }, - } - - // Apply with an invalid prefix. This should not change the workspace root path. - diags := bundle.Apply(context.Background(), b, ResolveVariableReferences("doesntexist")) - require.NoError(t, diags.Error()) - require.Equal(t, "${bundle.name}/bar", b.Config.Workspace.RootPath) - require.Equal(t, "${workspace.root_path}/baz", b.Config.Workspace.FilePath) - - // Apply with a valid prefix. This should change the workspace root path. - diags = bundle.Apply(context.Background(), b, ResolveVariableReferences("bundle", "workspace")) - require.NoError(t, diags.Error()) - require.Equal(t, "example/bar", b.Config.Workspace.RootPath) - require.Equal(t, "example/bar/baz", b.Config.Workspace.FilePath) -} - func TestResolveVariableReferencesToBundleVariables(t *testing.T) { b := &bundle.Bundle{ Config: config.Root{ @@ -65,37 +40,6 @@ func TestResolveVariableReferencesToBundleVariables(t *testing.T) { require.Equal(t, "example/bar", b.Config.Workspace.RootPath) } -func TestResolveVariableReferencesToEmptyFields(t *testing.T) { - b := &bundle.Bundle{ - Config: config.Root{ - Bundle: config.Bundle{ - Name: "example", - Git: config.Git{ - Branch: "", - }, - }, - Resources: config.Resources{ - Jobs: map[string]*resources.Job{ - "job1": { - JobSettings: &jobs.JobSettings{ - Tags: map[string]string{ - "git_branch": "${bundle.git.branch}", - }, - }, - }, - }, - }, - }, - } - - // Apply for the bundle prefix. - diags := bundle.Apply(context.Background(), b, ResolveVariableReferences("bundle")) - require.NoError(t, diags.Error()) - - // The job settings should have been interpolated to an empty string. - require.Equal(t, "", b.Config.Resources.Jobs["job1"].JobSettings.Tags["git_branch"]) -} - func TestResolveVariableReferencesForPrimitiveNonStringFields(t *testing.T) { var diags diag.Diagnostics @@ -250,63 +194,6 @@ func TestResolveComplexVariable(t *testing.T) { require.Equal(t, 2, b.Config.Resources.Jobs["job1"].JobSettings.JobClusters[0].NewCluster.NumWorkers) } -func TestResolveComplexVariableReferencesToFields(t *testing.T) { - b := &bundle.Bundle{ - Config: config.Root{ - Bundle: config.Bundle{ - Name: "example", - }, - Variables: map[string]*variable.Variable{ - "cluster": { - Value: map[string]any{ - "node_type_id": "Standard_DS3_v2", - "num_workers": 2, - }, - Type: variable.VariableTypeComplex, - }, - }, - - Resources: config.Resources{ - Jobs: map[string]*resources.Job{ - "job1": { - JobSettings: &jobs.JobSettings{ - JobClusters: []jobs.JobCluster{ - { - NewCluster: compute.ClusterSpec{ - NodeTypeId: "random", - }, - }, - }, - }, - }, - }, - }, - }, - } - - ctx := context.Background() - - // Assign the variables to the dynamic configuration. - diags := bundle.ApplyFunc(ctx, b, func(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { - err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) { - var p dyn.Path - var err error - - p = dyn.MustPathFromString("resources.jobs.job1.job_clusters[0].new_cluster") - v, err = dyn.SetByPath(v, p.Append(dyn.Key("node_type_id")), dyn.V("${var.cluster.node_type_id}")) - require.NoError(t, err) - - return v, nil - }) - return diag.FromErr(err) - }) - require.NoError(t, diags.Error()) - - diags = bundle.Apply(ctx, b, ResolveVariableReferences("bundle", "workspace", "variables")) - require.NoError(t, diags.Error()) - require.Equal(t, "Standard_DS3_v2", b.Config.Resources.Jobs["job1"].JobSettings.JobClusters[0].NewCluster.NodeTypeId) -} - func TestResolveComplexVariableReferencesWithComplexVariablesError(t *testing.T) { b := &bundle.Bundle{ Config: config.Root{ @@ -434,3 +321,57 @@ func TestResolveComplexVariableWithVarReference(t *testing.T) { require.NoError(t, diags.Error()) require.Equal(t, "cicd_template==1.0.0", b.Config.Resources.Jobs["job1"].JobSettings.Tasks[0].Libraries[0].Pypi.Package) } + +func TestResolveVariableReferencesWithSourceLinkedDeployment(t *testing.T) { + testCases := []struct { + enabled bool + assert func(t *testing.T, b *bundle.Bundle) + }{ + { + true, + func(t *testing.T, b *bundle.Bundle) { + // Variables that use workspace file path should have SyncRootValue during resolution phase + require.Equal(t, "sync/root/path", b.Config.Resources.Pipelines["pipeline1"].PipelineSpec.Configuration["source"]) + + // The file path itself should remain the same + require.Equal(t, "file/path", b.Config.Workspace.FilePath) + }, + }, + { + false, + func(t *testing.T, b *bundle.Bundle) { + require.Equal(t, "file/path", b.Config.Resources.Pipelines["pipeline1"].PipelineSpec.Configuration["source"]) + require.Equal(t, "file/path", b.Config.Workspace.FilePath) + }, + }, + } + + for _, testCase := range testCases { + b := &bundle.Bundle{ + SyncRootPath: "sync/root/path", + Config: config.Root{ + Presets: config.Presets{ + SourceLinkedDeployment: &testCase.enabled, + }, + Workspace: config.Workspace{ + FilePath: "file/path", + }, + Resources: config.Resources{ + Pipelines: map[string]*resources.Pipeline{ + "pipeline1": { + PipelineSpec: &pipelines.PipelineSpec{ + Configuration: map[string]string{ + "source": "${workspace.file_path}", + }, + }, + }, + }, + }, + }, + } + + diags := bundle.Apply(context.Background(), b, ResolveVariableReferences("workspace")) + require.NoError(t, diags.Error()) + testCase.assert(t, b) + } +} diff --git a/bundle/config/resources/clusters.go b/bundle/config/resources/clusters.go index ba991e865..073f40a79 100644 --- a/bundle/config/resources/clusters.go +++ b/bundle/config/resources/clusters.go @@ -2,7 +2,6 @@ package resources import ( "context" - "fmt" "net/url" "github.com/databricks/cli/libs/log" @@ -45,7 +44,7 @@ func (s *Cluster) InitializeURL(baseURL url.URL) { if s.ID == "" { return } - baseURL.Path = fmt.Sprintf("compute/clusters/%s", s.ID) + baseURL.Path = "compute/clusters/" + s.ID s.URL = baseURL.String() } diff --git a/bundle/config/resources/job.go b/bundle/config/resources/job.go index 0aa41b2e8..76de78439 100644 --- a/bundle/config/resources/job.go +++ b/bundle/config/resources/job.go @@ -2,7 +2,6 @@ package resources import ( "context" - "fmt" "net/url" "strconv" @@ -52,7 +51,7 @@ func (j *Job) InitializeURL(baseURL url.URL) { if j.ID == "" { return } - baseURL.Path = fmt.Sprintf("jobs/%s", j.ID) + baseURL.Path = "jobs/" + j.ID j.URL = baseURL.String() } diff --git a/bundle/config/resources/mlflow_experiment.go b/bundle/config/resources/mlflow_experiment.go index 5d179ec0f..ea18ce114 100644 --- a/bundle/config/resources/mlflow_experiment.go +++ b/bundle/config/resources/mlflow_experiment.go @@ -2,7 +2,6 @@ package resources import ( "context" - "fmt" "net/url" "github.com/databricks/cli/libs/log" @@ -47,7 +46,7 @@ func (s *MlflowExperiment) InitializeURL(baseURL url.URL) { if s.ID == "" { return } - baseURL.Path = fmt.Sprintf("ml/experiments/%s", s.ID) + baseURL.Path = "ml/experiments/" + s.ID s.URL = baseURL.String() } diff --git a/bundle/config/resources/mlflow_model.go b/bundle/config/resources/mlflow_model.go index 72376f45d..69ae2d438 100644 --- a/bundle/config/resources/mlflow_model.go +++ b/bundle/config/resources/mlflow_model.go @@ -2,7 +2,6 @@ package resources import ( "context" - "fmt" "net/url" "github.com/databricks/cli/libs/log" @@ -47,7 +46,7 @@ func (s *MlflowModel) InitializeURL(baseURL url.URL) { if s.ID == "" { return } - baseURL.Path = fmt.Sprintf("ml/models/%s", s.ID) + baseURL.Path = "ml/models/" + s.ID s.URL = baseURL.String() } diff --git a/bundle/config/resources/model_serving_endpoint.go b/bundle/config/resources/model_serving_endpoint.go index a3c472b3f..8b1394d86 100644 --- a/bundle/config/resources/model_serving_endpoint.go +++ b/bundle/config/resources/model_serving_endpoint.go @@ -2,7 +2,6 @@ package resources import ( "context" - "fmt" "net/url" "github.com/databricks/cli/libs/log" @@ -55,7 +54,7 @@ func (s *ModelServingEndpoint) InitializeURL(baseURL url.URL) { if s.ID == "" { return } - baseURL.Path = fmt.Sprintf("ml/endpoints/%s", s.ID) + baseURL.Path = "ml/endpoints/" + s.ID s.URL = baseURL.String() } diff --git a/bundle/config/resources/permission.go b/bundle/config/resources/permission.go index 62e18a09e..fa1568601 100644 --- a/bundle/config/resources/permission.go +++ b/bundle/config/resources/permission.go @@ -25,5 +25,5 @@ func (p Permission) String() string { return fmt.Sprintf("level: %s, group_name: %s", p.Level, p.GroupName) } - return fmt.Sprintf("level: %s", p.Level) + return "level: " + p.Level } diff --git a/bundle/config/resources/pipeline.go b/bundle/config/resources/pipeline.go index eaa4c5368..5127d07ba 100644 --- a/bundle/config/resources/pipeline.go +++ b/bundle/config/resources/pipeline.go @@ -2,7 +2,6 @@ package resources import ( "context" - "fmt" "net/url" "github.com/databricks/cli/libs/log" @@ -47,7 +46,7 @@ func (p *Pipeline) InitializeURL(baseURL url.URL) { if p.ID == "" { return } - baseURL.Path = fmt.Sprintf("pipelines/%s", p.ID) + baseURL.Path = "pipelines/" + p.ID p.URL = baseURL.String() } diff --git a/bundle/config/resources/quality_monitor.go b/bundle/config/resources/quality_monitor.go index b1d7e08a5..88bc0a3e7 100644 --- a/bundle/config/resources/quality_monitor.go +++ b/bundle/config/resources/quality_monitor.go @@ -2,7 +2,6 @@ package resources import ( "context" - "fmt" "net/url" "strings" @@ -51,7 +50,7 @@ func (s *QualityMonitor) InitializeURL(baseURL url.URL) { if s.TableName == "" { return } - baseURL.Path = fmt.Sprintf("explore/data/%s", strings.ReplaceAll(s.TableName, ".", "/")) + baseURL.Path = "explore/data/" + strings.ReplaceAll(s.TableName, ".", "/") s.URL = baseURL.String() } diff --git a/bundle/config/resources/registered_model.go b/bundle/config/resources/registered_model.go index 8513a79ae..006eef773 100644 --- a/bundle/config/resources/registered_model.go +++ b/bundle/config/resources/registered_model.go @@ -2,7 +2,6 @@ package resources import ( "context" - "fmt" "net/url" "strings" @@ -57,7 +56,7 @@ func (s *RegisteredModel) InitializeURL(baseURL url.URL) { if s.ID == "" { return } - baseURL.Path = fmt.Sprintf("explore/data/models/%s", strings.ReplaceAll(s.ID, ".", "/")) + baseURL.Path = "explore/data/models/" + strings.ReplaceAll(s.ID, ".", "/") s.URL = baseURL.String() } diff --git a/bundle/config/resources/schema.go b/bundle/config/resources/schema.go index 8eadd7e46..b638907ac 100644 --- a/bundle/config/resources/schema.go +++ b/bundle/config/resources/schema.go @@ -2,7 +2,7 @@ package resources import ( "context" - "fmt" + "errors" "net/url" "strings" @@ -26,7 +26,7 @@ type Schema struct { } func (s *Schema) Exists(ctx context.Context, w *databricks.WorkspaceClient, id string) (bool, error) { - return false, fmt.Errorf("schema.Exists() is not supported") + return false, errors.New("schema.Exists() is not supported") } func (s *Schema) TerraformResourceName() string { @@ -37,7 +37,7 @@ func (s *Schema) InitializeURL(baseURL url.URL) { if s.ID == "" { return } - baseURL.Path = fmt.Sprintf("explore/data/%s", strings.ReplaceAll(s.ID, ".", "/")) + baseURL.Path = "explore/data/" + strings.ReplaceAll(s.ID, ".", "/") s.URL = baseURL.String() } diff --git a/bundle/config/resources/volume.go b/bundle/config/resources/volume.go index cae2a3463..882b7107d 100644 --- a/bundle/config/resources/volume.go +++ b/bundle/config/resources/volume.go @@ -2,7 +2,7 @@ package resources import ( "context" - "fmt" + "errors" "net/url" "strings" @@ -34,7 +34,7 @@ func (v Volume) MarshalJSON() ([]byte, error) { } func (v *Volume) Exists(ctx context.Context, w *databricks.WorkspaceClient, id string) (bool, error) { - return false, fmt.Errorf("volume.Exists() is not supported") + return false, errors.New("volume.Exists() is not supported") } func (v *Volume) TerraformResourceName() string { @@ -45,7 +45,7 @@ func (v *Volume) InitializeURL(baseURL url.URL) { if v.ID == "" { return } - baseURL.Path = fmt.Sprintf("explore/data/volumes/%s", strings.ReplaceAll(v.ID, ".", "/")) + baseURL.Path = "explore/data/volumes/" + strings.ReplaceAll(v.ID, ".", "/") v.URL = baseURL.String() } diff --git a/bundle/config/validate/folder_permissions.go b/bundle/config/validate/folder_permissions.go index 5b28d599e..7b12b4b16 100644 --- a/bundle/config/validate/folder_permissions.go +++ b/bundle/config/validate/folder_permissions.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "path" + "strconv" "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/libraries" @@ -60,7 +61,7 @@ func checkFolderPermission(ctx context.Context, b bundle.ReadOnlyBundle, folderP } objPermissions, err := w.GetPermissions(ctx, workspace.GetWorkspaceObjectPermissionsRequest{ - WorkspaceObjectId: fmt.Sprint(obj.ObjectId), + WorkspaceObjectId: strconv.FormatInt(obj.ObjectId, 10), WorkspaceObjectType: "directories", }) if err != nil { diff --git a/bundle/config/validate/unique_resource_keys.go b/bundle/config/validate/unique_resource_keys.go index 50295375b..d80c5d632 100644 --- a/bundle/config/validate/unique_resource_keys.go +++ b/bundle/config/validate/unique_resource_keys.go @@ -2,7 +2,6 @@ package validate import ( "context" - "fmt" "sort" "github.com/databricks/cli/bundle" @@ -102,7 +101,7 @@ func (m *uniqueResourceKeys) Apply(ctx context.Context, b *bundle.Bundle) diag.D // If there are multiple resources with the same key, report an error. diags = append(diags, diag.Diagnostic{ Severity: diag.Error, - Summary: fmt.Sprintf("multiple resources have been defined with the same key: %s", k), + Summary: "multiple resources have been defined with the same key: " + k, Locations: v.locations, Paths: v.paths, }) diff --git a/bundle/config/validate/validate_artifact_path.go b/bundle/config/validate/validate_artifact_path.go index 5bab99ccf..aa4492670 100644 --- a/bundle/config/validate/validate_artifact_path.go +++ b/bundle/config/validate/validate_artifact_path.go @@ -68,7 +68,7 @@ func findVolumeInBundle(r config.Root, catalogName, schemaName, volumeName strin if v.SchemaName != schemaName && !isSchemaDefinedInBundle { continue } - pathString := fmt.Sprintf("resources.volumes.%s", k) + pathString := "resources.volumes." + k return dyn.MustPathFromString(pathString), r.GetLocations(pathString), true } return nil, nil, false diff --git a/bundle/config/validate/validate_artifact_path_test.go b/bundle/config/validate/validate_artifact_path_test.go index 8fb5c9618..e1ae6af34 100644 --- a/bundle/config/validate/validate_artifact_path_test.go +++ b/bundle/config/validate/validate_artifact_path_test.go @@ -2,7 +2,6 @@ package validate import ( "context" - "fmt" "testing" "github.com/databricks/cli/bundle" @@ -152,7 +151,7 @@ func TestExtractVolumeFromPath(t *testing.T) { for _, p := range invalidVolumePaths() { _, _, _, err := extractVolumeFromPath(p) - assert.EqualError(t, err, fmt.Sprintf("expected UC volume path to be in the format /Volumes////..., got %s", p)) + assert.EqualError(t, err, "expected UC volume path to be in the format /Volumes////..., got "+p) } } @@ -171,7 +170,7 @@ func TestValidateArtifactPathWithInvalidPaths(t *testing.T) { diags := bundle.ApplyReadOnly(context.Background(), bundle.ReadOnly(b), ValidateArtifactPath()) require.Equal(t, diag.Diagnostics{{ Severity: diag.Error, - Summary: fmt.Sprintf("expected UC volume path to be in the format /Volumes////..., got %s", p), + Summary: "expected UC volume path to be in the format /Volumes////..., got " + p, Locations: []dyn.Location{{File: "config.yml", Line: 1, Column: 2}}, Paths: []dyn.Path{dyn.MustPathFromString("workspace.artifact_path")}, }}, diags) diff --git a/bundle/config/variable/lookup.go b/bundle/config/variable/lookup.go index 37e380f18..71c8512e3 100755 --- a/bundle/config/variable/lookup.go +++ b/bundle/config/variable/lookup.go @@ -2,7 +2,7 @@ package variable import ( "context" - "fmt" + "errors" "github.com/databricks/databricks-sdk-go" ) @@ -83,11 +83,11 @@ func (l *Lookup) constructResolver() (resolver, error) { switch len(resolvers) { case 0: - return nil, fmt.Errorf("no valid lookup fields provided") + return nil, errors.New("no valid lookup fields provided") case 1: return resolvers[0], nil default: - return nil, fmt.Errorf("exactly one lookup field must be provided") + return nil, errors.New("exactly one lookup field must be provided") } } diff --git a/bundle/config/variable/resolve_alert.go b/bundle/config/variable/resolve_alert.go index be83e81fa..507306aa0 100644 --- a/bundle/config/variable/resolve_alert.go +++ b/bundle/config/variable/resolve_alert.go @@ -2,7 +2,6 @@ package variable import ( "context" - "fmt" "github.com/databricks/databricks-sdk-go" ) @@ -16,9 +15,9 @@ func (l resolveAlert) Resolve(ctx context.Context, w *databricks.WorkspaceClient if err != nil { return "", err } - return fmt.Sprint(entity.Id), nil + return entity.Id, nil } func (l resolveAlert) String() string { - return fmt.Sprintf("alert: %s", l.name) + return "alert: " + l.name } diff --git a/bundle/config/variable/resolve_cluster.go b/bundle/config/variable/resolve_cluster.go index a8cf3fe7f..51278aef5 100644 --- a/bundle/config/variable/resolve_cluster.go +++ b/bundle/config/variable/resolve_cluster.go @@ -42,5 +42,5 @@ func (l resolveCluster) Resolve(ctx context.Context, w *databricks.WorkspaceClie } func (l resolveCluster) String() string { - return fmt.Sprintf("cluster: %s", l.name) + return "cluster: " + l.name } diff --git a/bundle/config/variable/resolve_cluster_policy.go b/bundle/config/variable/resolve_cluster_policy.go index b19380a63..94fd892b2 100644 --- a/bundle/config/variable/resolve_cluster_policy.go +++ b/bundle/config/variable/resolve_cluster_policy.go @@ -2,7 +2,6 @@ package variable import ( "context" - "fmt" "github.com/databricks/databricks-sdk-go" ) @@ -16,9 +15,9 @@ func (l resolveClusterPolicy) Resolve(ctx context.Context, w *databricks.Workspa if err != nil { return "", err } - return fmt.Sprint(entity.PolicyId), nil + return entity.PolicyId, nil } func (l resolveClusterPolicy) String() string { - return fmt.Sprintf("cluster-policy: %s", l.name) + return "cluster-policy: " + l.name } diff --git a/bundle/config/variable/resolve_dashboard.go b/bundle/config/variable/resolve_dashboard.go index 44fd45197..2979716ce 100644 --- a/bundle/config/variable/resolve_dashboard.go +++ b/bundle/config/variable/resolve_dashboard.go @@ -2,7 +2,6 @@ package variable import ( "context" - "fmt" "github.com/databricks/databricks-sdk-go" ) @@ -16,9 +15,9 @@ func (l resolveDashboard) Resolve(ctx context.Context, w *databricks.WorkspaceCl if err != nil { return "", err } - return fmt.Sprint(entity.Id), nil + return entity.Id, nil } func (l resolveDashboard) String() string { - return fmt.Sprintf("dashboard: %s", l.name) + return "dashboard: " + l.name } diff --git a/bundle/config/variable/resolve_instance_pool.go b/bundle/config/variable/resolve_instance_pool.go index cbf0775c9..600b47a50 100644 --- a/bundle/config/variable/resolve_instance_pool.go +++ b/bundle/config/variable/resolve_instance_pool.go @@ -2,7 +2,6 @@ package variable import ( "context" - "fmt" "github.com/databricks/databricks-sdk-go" ) @@ -16,9 +15,9 @@ func (l resolveInstancePool) Resolve(ctx context.Context, w *databricks.Workspac if err != nil { return "", err } - return fmt.Sprint(entity.InstancePoolId), nil + return entity.InstancePoolId, nil } func (l resolveInstancePool) String() string { - return fmt.Sprintf("instance-pool: %s", l.name) + return "instance-pool: " + l.name } diff --git a/bundle/config/variable/resolve_job.go b/bundle/config/variable/resolve_job.go index 3def64888..4fe6ae3e7 100644 --- a/bundle/config/variable/resolve_job.go +++ b/bundle/config/variable/resolve_job.go @@ -2,7 +2,7 @@ package variable import ( "context" - "fmt" + "strconv" "github.com/databricks/databricks-sdk-go" ) @@ -16,9 +16,9 @@ func (l resolveJob) Resolve(ctx context.Context, w *databricks.WorkspaceClient) if err != nil { return "", err } - return fmt.Sprint(entity.JobId), nil + return strconv.FormatInt(entity.JobId, 10), nil } func (l resolveJob) String() string { - return fmt.Sprintf("job: %s", l.name) + return "job: " + l.name } diff --git a/bundle/config/variable/resolve_metastore.go b/bundle/config/variable/resolve_metastore.go index 958e43787..8a0a8c7ed 100644 --- a/bundle/config/variable/resolve_metastore.go +++ b/bundle/config/variable/resolve_metastore.go @@ -2,7 +2,6 @@ package variable import ( "context" - "fmt" "github.com/databricks/databricks-sdk-go" ) @@ -16,9 +15,9 @@ func (l resolveMetastore) Resolve(ctx context.Context, w *databricks.WorkspaceCl if err != nil { return "", err } - return fmt.Sprint(entity.MetastoreId), nil + return entity.MetastoreId, nil } func (l resolveMetastore) String() string { - return fmt.Sprintf("metastore: %s", l.name) + return "metastore: " + l.name } diff --git a/bundle/config/variable/resolve_notification_destination.go b/bundle/config/variable/resolve_notification_destination.go index 4c4cd892a..4696a52c8 100644 --- a/bundle/config/variable/resolve_notification_destination.go +++ b/bundle/config/variable/resolve_notification_destination.go @@ -42,5 +42,5 @@ func (l resolveNotificationDestination) Resolve(ctx context.Context, w *databric } func (l resolveNotificationDestination) String() string { - return fmt.Sprintf("notification-destination: %s", l.name) + return "notification-destination: " + l.name } diff --git a/bundle/config/variable/resolve_notification_destination_test.go b/bundle/config/variable/resolve_notification_destination_test.go index 2b8201d15..f44b2f3e9 100644 --- a/bundle/config/variable/resolve_notification_destination_test.go +++ b/bundle/config/variable/resolve_notification_destination_test.go @@ -2,7 +2,7 @@ package variable import ( "context" - "fmt" + "errors" "testing" "github.com/databricks/databricks-sdk-go/experimental/mocks" @@ -35,7 +35,7 @@ func TestResolveNotificationDestination_ResolveError(t *testing.T) { api := m.GetMockNotificationDestinationsAPI() api.EXPECT(). ListAll(mock.Anything, mock.Anything). - Return(nil, fmt.Errorf("bad")) + Return(nil, errors.New("bad")) ctx := context.Background() l := resolveNotificationDestination{name: "destination"} diff --git a/bundle/config/variable/resolve_pipeline.go b/bundle/config/variable/resolve_pipeline.go index cabc620da..33b14530d 100644 --- a/bundle/config/variable/resolve_pipeline.go +++ b/bundle/config/variable/resolve_pipeline.go @@ -2,7 +2,6 @@ package variable import ( "context" - "fmt" "github.com/databricks/databricks-sdk-go" ) @@ -16,9 +15,9 @@ func (l resolvePipeline) Resolve(ctx context.Context, w *databricks.WorkspaceCli if err != nil { return "", err } - return fmt.Sprint(entity.PipelineId), nil + return entity.PipelineId, nil } func (l resolvePipeline) String() string { - return fmt.Sprintf("pipeline: %s", l.name) + return "pipeline: " + l.name } diff --git a/bundle/config/variable/resolve_query.go b/bundle/config/variable/resolve_query.go index 602ff8deb..88f653dc6 100644 --- a/bundle/config/variable/resolve_query.go +++ b/bundle/config/variable/resolve_query.go @@ -2,7 +2,6 @@ package variable import ( "context" - "fmt" "github.com/databricks/databricks-sdk-go" ) @@ -16,9 +15,9 @@ func (l resolveQuery) Resolve(ctx context.Context, w *databricks.WorkspaceClient if err != nil { return "", err } - return fmt.Sprint(entity.Id), nil + return entity.Id, nil } func (l resolveQuery) String() string { - return fmt.Sprintf("query: %s", l.name) + return "query: " + l.name } diff --git a/bundle/config/variable/resolve_service_principal.go b/bundle/config/variable/resolve_service_principal.go index 3bea4314b..03b8e3089 100644 --- a/bundle/config/variable/resolve_service_principal.go +++ b/bundle/config/variable/resolve_service_principal.go @@ -2,7 +2,6 @@ package variable import ( "context" - "fmt" "github.com/databricks/databricks-sdk-go" ) @@ -16,9 +15,9 @@ func (l resolveServicePrincipal) Resolve(ctx context.Context, w *databricks.Work if err != nil { return "", err } - return fmt.Sprint(entity.ApplicationId), nil + return entity.ApplicationId, nil } func (l resolveServicePrincipal) String() string { - return fmt.Sprintf("service-principal: %s", l.name) + return "service-principal: " + l.name } diff --git a/bundle/config/variable/resolve_warehouse.go b/bundle/config/variable/resolve_warehouse.go index fbd3663a2..cabdb1160 100644 --- a/bundle/config/variable/resolve_warehouse.go +++ b/bundle/config/variable/resolve_warehouse.go @@ -2,7 +2,6 @@ package variable import ( "context" - "fmt" "github.com/databricks/databricks-sdk-go" ) @@ -16,9 +15,9 @@ func (l resolveWarehouse) Resolve(ctx context.Context, w *databricks.WorkspaceCl if err != nil { return "", err } - return fmt.Sprint(entity.Id), nil + return entity.Id, nil } func (l resolveWarehouse) String() string { - return fmt.Sprintf("warehouse: %s", l.name) + return "warehouse: " + l.name } diff --git a/bundle/config/variable/variable.go b/bundle/config/variable/variable.go index 2362ad10d..95a68cfeb 100644 --- a/bundle/config/variable/variable.go +++ b/bundle/config/variable/variable.go @@ -1,6 +1,7 @@ package variable import ( + "errors" "fmt" "reflect" ) @@ -68,7 +69,7 @@ func (v *Variable) Set(val VariableValue) error { switch rv.Kind() { case reflect.Struct, reflect.Array, reflect.Slice, reflect.Map: if v.Type != VariableTypeComplex { - return fmt.Errorf("variable type is not complex") + return errors.New("variable type is not complex") } } diff --git a/bundle/deploy/metadata/compute.go b/bundle/deploy/metadata/compute.go index bc8767de4..b47baa6b2 100644 --- a/bundle/deploy/metadata/compute.go +++ b/bundle/deploy/metadata/compute.go @@ -54,5 +54,8 @@ func (m *compute) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnostics { // Set file upload destination of the bundle in metadata b.Metadata.Config.Workspace.FilePath = b.Config.Workspace.FilePath + if config.IsExplicitlyEnabled(b.Config.Presets.SourceLinkedDeployment) { + b.Metadata.Config.Workspace.FilePath = b.SyncRootPath + } return nil } diff --git a/bundle/deploy/metadata/compute_test.go b/bundle/deploy/metadata/compute_test.go index 2c2c72376..c6fa9bddb 100644 --- a/bundle/deploy/metadata/compute_test.go +++ b/bundle/deploy/metadata/compute_test.go @@ -97,3 +97,24 @@ func TestComputeMetadataMutator(t *testing.T) { assert.Equal(t, expectedMetadata, b.Metadata) } + +func TestComputeMetadataMutatorSourceLinked(t *testing.T) { + syncRootPath := "/Users/shreyas.goenka@databricks.com/source" + enabled := true + b := &bundle.Bundle{ + SyncRootPath: syncRootPath, + Config: config.Root{ + Presets: config.Presets{ + SourceLinkedDeployment: &enabled, + }, + Workspace: config.Workspace{ + FilePath: "/Users/shreyas.goenka@databricks.com/files", + }, + }, + } + + diags := bundle.Apply(context.Background(), b, Compute()) + require.NoError(t, diags.Error()) + + assert.Equal(t, syncRootPath, b.Metadata.Config.Workspace.FilePath) +} diff --git a/bundle/deploy/state.go b/bundle/deploy/state.go index a131ab9c3..6e285034a 100644 --- a/bundle/deploy/state.go +++ b/bundle/deploy/state.go @@ -3,6 +3,7 @@ package deploy import ( "context" "encoding/json" + "errors" "fmt" "io" "io/fs" @@ -95,7 +96,7 @@ func (e *entry) Type() fs.FileMode { func (e *entry) Info() (fs.FileInfo, error) { if e.info == nil { - return nil, fmt.Errorf("no info available") + return nil, errors.New("no info available") } return e.info, nil } diff --git a/bundle/deploy/terraform/check_dashboards_modified_remotely.go b/bundle/deploy/terraform/check_dashboards_modified_remotely.go index f263e8a7f..66914af54 100644 --- a/bundle/deploy/terraform/check_dashboards_modified_remotely.go +++ b/bundle/deploy/terraform/check_dashboards_modified_remotely.go @@ -72,7 +72,7 @@ func (l *checkDashboardsModifiedRemotely) Apply(ctx context.Context, b *bundle.B continue } - path := dyn.MustPathFromString(fmt.Sprintf("resources.dashboards.%s", dashboard.Name)) + path := dyn.MustPathFromString("resources.dashboards." + dashboard.Name) loc := b.Config.GetLocation(path.String()) actual, err := b.WorkspaceClient().Lakeview.GetByDashboardId(ctx, dashboard.ID) if err != nil { diff --git a/bundle/deploy/terraform/check_dashboards_modified_remotely_test.go b/bundle/deploy/terraform/check_dashboards_modified_remotely_test.go index 1bed3b1be..46bdc1f38 100644 --- a/bundle/deploy/terraform/check_dashboards_modified_remotely_test.go +++ b/bundle/deploy/terraform/check_dashboards_modified_remotely_test.go @@ -2,7 +2,7 @@ package terraform import ( "context" - "fmt" + "errors" "path/filepath" "testing" @@ -122,7 +122,7 @@ func TestCheckDashboardsModifiedRemotely_ExistingStateFailureToGet(t *testing.T) dashboardsAPI := m.GetMockLakeviewAPI() dashboardsAPI.EXPECT(). GetByDashboardId(mock.Anything, "id1"). - Return(nil, fmt.Errorf("failure")). + Return(nil, errors.New("failure")). Once() b.SetWorkpaceClient(m.WorkspaceClient) diff --git a/bundle/deploy/terraform/init.go b/bundle/deploy/terraform/init.go index 366f0be8c..e69f0bf0f 100644 --- a/bundle/deploy/terraform/init.go +++ b/bundle/deploy/terraform/init.go @@ -230,7 +230,7 @@ func setUserAgentExtraEnvVar(environ map[string]string, b *bundle.Bundle) error // Add "cli" to the user agent in set by the Databricks Terraform provider. // This will allow us to attribute downstream requests made by the Databricks // Terraform provider to the CLI. - products := []string{fmt.Sprintf("cli/%s", build.GetInfo().Version)} + products := []string{"cli/" + build.GetInfo().Version} if experimental := b.Config.Experimental; experimental != nil { if experimental.PyDABs.Enabled { products = append(products, "databricks-pydabs/0.0.0") diff --git a/bundle/deploy/terraform/load.go b/bundle/deploy/terraform/load.go index 3fb76855e..1c563fa77 100644 --- a/bundle/deploy/terraform/load.go +++ b/bundle/deploy/terraform/load.go @@ -2,6 +2,7 @@ package terraform import ( "context" + "errors" "fmt" "slices" @@ -58,7 +59,7 @@ func (l *load) validateState(state *resourcesState) error { } if len(state.Resources) == 0 && slices.Contains(l.modes, ErrorOnEmptyState) { - return fmt.Errorf("no deployment state. Did you forget to run 'databricks bundle deploy'?") + return errors.New("no deployment state. Did you forget to run 'databricks bundle deploy'?") } return nil diff --git a/bundle/internal/schema/annotations.yml b/bundle/internal/schema/annotations.yml index 84f6753e3..5283a431b 100644 --- a/bundle/internal/schema/annotations.yml +++ b/bundle/internal/schema/annotations.yml @@ -69,6 +69,9 @@ github.com/databricks/cli/bundle/config.Experimental: "pydabs": "description": |- The PyDABs configuration. + "python": + "description": |- + Configures loading of Python code defined with 'databricks-bundles' package. "python_wheel_wrapper": "description": |- Whether to use a Python wheel wrapper @@ -125,6 +128,24 @@ github.com/databricks/cli/bundle/config.PyDABs: "venv_path": "description": |- The Python virtual environment path +github.com/databricks/cli/bundle/config.Python: + "mutators": + "description": |- + Mutators contains a list of fully qualified function paths to mutator functions. + + Example: ["my_project.mutators:add_default_cluster"] + "resources": + "description": |- + Resources contains a list of fully qualified function paths to load resources + defined in Python code. + + Example: ["my_project.resources:load_resources"] + "venv_path": + "description": |- + VEnvPath is path to the virtual environment. + + If enabled, Python code will execute within this environment. If disabled, + it defaults to using the Python interpreter available in the current shell. github.com/databricks/cli/bundle/config.Resources: "clusters": "description": |- diff --git a/bundle/libraries/helpers.go b/bundle/libraries/helpers.go index 2149e5885..5a1a9511c 100644 --- a/bundle/libraries/helpers.go +++ b/bundle/libraries/helpers.go @@ -1,7 +1,7 @@ package libraries import ( - "fmt" + "errors" "github.com/databricks/databricks-sdk-go/service/compute" ) @@ -20,5 +20,5 @@ func libraryPath(library *compute.Library) (string, error) { return library.Requirements, nil } - return "", fmt.Errorf("not supported library type") + return "", errors.New("not supported library type") } diff --git a/bundle/permissions/workspace_root.go b/bundle/permissions/workspace_root.go index 4ac0d38a5..828b12f50 100644 --- a/bundle/permissions/workspace_root.go +++ b/bundle/permissions/workspace_root.go @@ -3,6 +3,7 @@ package permissions import ( "context" "fmt" + "strconv" "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/libraries" @@ -78,7 +79,7 @@ func setPermissions(ctx context.Context, w workspace.WorkspaceInterface, path st } _, err = w.SetPermissions(ctx, workspace.WorkspaceObjectPermissionsRequest{ - WorkspaceObjectId: fmt.Sprint(obj.ObjectId), + WorkspaceObjectId: strconv.FormatInt(obj.ObjectId, 10), WorkspaceObjectType: "directories", AccessControlList: permissions, }) diff --git a/bundle/phases/deploy.go b/bundle/phases/deploy.go index 2dc9623bd..16595611f 100644 --- a/bundle/phases/deploy.go +++ b/bundle/phases/deploy.go @@ -2,7 +2,7 @@ package phases import ( "context" - "fmt" + "errors" "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/artifacts" @@ -54,7 +54,7 @@ func filterDeleteOrRecreateActions(changes []*tfjson.ResourceChange, resourceTyp func approvalForDeploy(ctx context.Context, b *bundle.Bundle) (bool, error) { tf := b.Terraform if tf == nil { - return false, fmt.Errorf("terraform not initialized") + return false, errors.New("terraform not initialized") } // read plan file @@ -111,7 +111,7 @@ is removed from the catalog, but the underlying files are not deleted:` } if !cmdio.IsPromptSupported(ctx) { - return false, fmt.Errorf("the deployment requires destructive actions, but current console does not support prompting. Please specify --auto-approve if you would like to skip prompts and proceed") + return false, errors.New("the deployment requires destructive actions, but current console does not support prompting. Please specify --auto-approve if you would like to skip prompts and proceed") } cmdio.LogString(ctx, "") diff --git a/bundle/phases/destroy.go b/bundle/phases/destroy.go index 6eb8b6a01..05a41dea2 100644 --- a/bundle/phases/destroy.go +++ b/bundle/phases/destroy.go @@ -3,7 +3,6 @@ package phases import ( "context" "errors" - "fmt" "net/http" "github.com/databricks/cli/bundle" @@ -34,7 +33,7 @@ func assertRootPathExists(ctx context.Context, b *bundle.Bundle) (bool, error) { func approvalForDestroy(ctx context.Context, b *bundle.Bundle) (bool, error) { tf := b.Terraform if tf == nil { - return false, fmt.Errorf("terraform not initialized") + return false, errors.New("terraform not initialized") } // read plan file @@ -63,7 +62,7 @@ func approvalForDestroy(ctx context.Context, b *bundle.Bundle) (bool, error) { } - cmdio.LogString(ctx, fmt.Sprintf("All files and directories at the following location will be deleted: %s", b.Config.Workspace.RootPath)) + cmdio.LogString(ctx, "All files and directories at the following location will be deleted: "+b.Config.Workspace.RootPath) cmdio.LogString(ctx, "") if b.AutoApprove { diff --git a/bundle/phases/initialize.go b/bundle/phases/initialize.go index 6fa0e5fed..f7b3cd608 100644 --- a/bundle/phases/initialize.go +++ b/bundle/phases/initialize.go @@ -41,6 +41,10 @@ func Initialize() bundle.Mutator { mutator.PopulateCurrentUser(), mutator.LoadGitDetails(), + // This mutator needs to be run before variable interpolation and defining default workspace paths + // because it affects how workspace variables are resolved. + mutator.ApplySourceLinkedDeploymentPreset(), + mutator.DefineDefaultWorkspaceRoot(), mutator.ExpandWorkspaceRoot(), mutator.DefineDefaultWorkspacePaths(), @@ -51,10 +55,13 @@ func Initialize() bundle.Mutator { mutator.RewriteWorkspacePrefix(), mutator.SetVariables(), + // Intentionally placed before ResolveVariableReferencesInLookup, ResolveResourceReferences, // ResolveVariableReferencesInComplexVariables and ResolveVariableReferences. // See what is expected in PythonMutatorPhaseInit doc pythonmutator.PythonMutator(pythonmutator.PythonMutatorPhaseInit), + pythonmutator.PythonMutator(pythonmutator.PythonMutatorPhaseLoadResources), + pythonmutator.PythonMutator(pythonmutator.PythonMutatorPhaseApplyMutators), mutator.ResolveVariableReferencesInLookup(), mutator.ResolveResourceReferences(), mutator.ResolveVariableReferencesInComplexVariables(), diff --git a/bundle/root.go b/bundle/root.go index efc21e0ca..9ea9a8c13 100644 --- a/bundle/root.go +++ b/bundle/root.go @@ -2,6 +2,7 @@ package bundle import ( "context" + "errors" "fmt" "os" @@ -21,7 +22,7 @@ func getRootEnv(ctx context.Context) (string, error) { } stat, err := os.Stat(path) if err == nil && !stat.IsDir() { - err = fmt.Errorf("not a directory") + err = errors.New("not a directory") } if err != nil { return "", fmt.Errorf(`invalid bundle root %s="%s": %w`, env.RootVariable, path, err) diff --git a/bundle/run/job.go b/bundle/run/job.go index b43db9184..2489ca619 100644 --- a/bundle/run/job.go +++ b/bundle/run/job.go @@ -3,6 +3,7 @@ package run import ( "context" "encoding/json" + "errors" "fmt" "strconv" "time" @@ -181,13 +182,13 @@ func (r *jobRunner) Run(ctx context.Context, opts *Options) (output.RunOutput, e // callback to log progress events. Called on every poll request progressLogger, ok := cmdio.FromContext(ctx) if !ok { - return nil, fmt.Errorf("no progress logger found") + return nil, errors.New("no progress logger found") } logProgress := logProgressCallback(ctx, progressLogger) waiter, err := w.Jobs.RunNow(ctx, *req) if err != nil { - return nil, fmt.Errorf("cannot start job") + return nil, errors.New("cannot start job") } if opts.NoWait { @@ -266,7 +267,7 @@ func (r *jobRunner) convertPythonParams(opts *Options) error { if len(opts.Job.pythonParams) > 0 { if _, ok := opts.Job.notebookParams["__python_params"]; ok { - return fmt.Errorf("can't use __python_params as notebook param, the name is reserved for internal use") + return errors.New("can't use __python_params as notebook param, the name is reserved for internal use") } p, err := json.Marshal(opts.Job.pythonParams) if err != nil { diff --git a/bundle/run/job_options.go b/bundle/run/job_options.go index 6a03dff95..7db8e72cd 100644 --- a/bundle/run/job_options.go +++ b/bundle/run/job_options.go @@ -1,7 +1,7 @@ package run import ( - "fmt" + "errors" "strconv" "github.com/databricks/cli/bundle/config/resources" @@ -60,16 +60,16 @@ func (o *JobOptions) hasJobParametersConfigured() bool { // Validate returns if the combination of options is valid. func (o *JobOptions) Validate(job *resources.Job) error { if job == nil { - return fmt.Errorf("job not defined") + return errors.New("job not defined") } // Ensure mutual exclusion on job parameters and task parameters. hasJobParams := len(job.Parameters) > 0 if hasJobParams && o.hasTaskParametersConfigured() { - return fmt.Errorf("the job to run defines job parameters; specifying task parameters is not allowed") + return errors.New("the job to run defines job parameters; specifying task parameters is not allowed") } if !hasJobParams && o.hasJobParametersConfigured() { - return fmt.Errorf("the job to run does not define job parameters; specifying job parameters is not allowed") + return errors.New("the job to run does not define job parameters; specifying job parameters is not allowed") } return nil @@ -80,7 +80,7 @@ func (o *JobOptions) validatePipelineParams() (*jobs.PipelineParams, error) { return nil, nil } - defaultErr := fmt.Errorf("job run argument --pipeline-params only supports `full_refresh=`") + defaultErr := errors.New("job run argument --pipeline-params only supports `full_refresh=`") v, ok := o.pipelineParams["full_refresh"] if !ok { return nil, defaultErr diff --git a/bundle/run/output/job.go b/bundle/run/output/job.go index 6199ac2f7..2ac974cd5 100644 --- a/bundle/run/output/job.go +++ b/bundle/run/output/job.go @@ -47,7 +47,7 @@ func (out *JobOutput) String() (string, error) { } result.WriteString("=======\n") result.WriteString(fmt.Sprintf("Task %s:\n", v.TaskKey)) - result.WriteString(fmt.Sprintf("%s\n", taskString)) + result.WriteString(taskString + "\n") } return result.String(), nil } diff --git a/bundle/run/output/task.go b/bundle/run/output/task.go index 1ef78a8c3..53b989e88 100644 --- a/bundle/run/output/task.go +++ b/bundle/run/output/task.go @@ -2,7 +2,6 @@ package output import ( "encoding/json" - "fmt" "github.com/databricks/databricks-sdk-go/service/jobs" ) @@ -27,7 +26,7 @@ func structToString(val any) (string, error) { func (out *NotebookOutput) String() (string, error) { if out.Truncated { - return fmt.Sprintf("%s\n[truncated...]\n", out.Result), nil + return out.Result + "\n[truncated...]\n", nil } return out.Result, nil } @@ -42,7 +41,7 @@ func (out *DbtOutput) String() (string, error) { // JSON is used because it's a convenient representation. // If user needs machine parsable output, they can use the --output json // flag - return fmt.Sprintf("Dbt Task Output:\n%s", outputString), nil + return "Dbt Task Output:\n" + outputString, nil } func (out *SqlOutput) String() (string, error) { @@ -55,12 +54,12 @@ func (out *SqlOutput) String() (string, error) { // JSON is used because it's a convenient representation. // If user needs machine parsable output, they can use the --output json // flag - return fmt.Sprintf("SQL Task Output:\n%s", outputString), nil + return "SQL Task Output:\n" + outputString, nil } func (out *LogsOutput) String() (string, error) { if out.LogsTruncated { - return fmt.Sprintf("%s\n[truncated...]\n", out.Logs), nil + return out.Logs + "\n[truncated...]\n", nil } return out.Logs, nil } diff --git a/bundle/run/pipeline.go b/bundle/run/pipeline.go index c447f044a..bdcf0f142 100644 --- a/bundle/run/pipeline.go +++ b/bundle/run/pipeline.go @@ -2,6 +2,7 @@ package run import ( "context" + "errors" "fmt" "time" @@ -33,7 +34,7 @@ func (r *pipelineRunner) logEvent(ctx context.Context, event pipelines.PipelineE if event.Error != nil && len(event.Error.Exceptions) > 0 { logString += "trace for most recent exception: \n" for i := range len(event.Error.Exceptions) { - logString += fmt.Sprintf("%s\n", event.Error.Exceptions[i].Message) + logString += event.Error.Exceptions[i].Message + "\n" } } if logString != "" { @@ -107,7 +108,7 @@ func (r *pipelineRunner) Run(ctx context.Context, opts *Options) (output.RunOutp updateTracker := progress.NewUpdateTracker(pipelineID, updateID, w) progressLogger, ok := cmdio.FromContext(ctx) if !ok { - return nil, fmt.Errorf("no progress logger found") + return nil, errors.New("no progress logger found") } // Log the pipeline update URL as soon as it is available. @@ -144,7 +145,7 @@ func (r *pipelineRunner) Run(ctx context.Context, opts *Options) (output.RunOutp if state == pipelines.UpdateInfoStateCanceled { log.Infof(ctx, "Update was cancelled!") - return nil, fmt.Errorf("update cancelled") + return nil, errors.New("update cancelled") } if state == pipelines.UpdateInfoStateFailed { log.Infof(ctx, "Update has failed!") @@ -152,7 +153,7 @@ func (r *pipelineRunner) Run(ctx context.Context, opts *Options) (output.RunOutp if err != nil { return nil, err } - return nil, fmt.Errorf("update failed") + return nil, errors.New("update failed") } if state == pipelines.UpdateInfoStateCompleted { log.Infof(ctx, "Update has completed successfully!") diff --git a/bundle/run/progress/pipeline.go b/bundle/run/progress/pipeline.go index b82dd7abd..ce92c4cde 100644 --- a/bundle/run/progress/pipeline.go +++ b/bundle/run/progress/pipeline.go @@ -33,7 +33,7 @@ func (event *ProgressEvent) String() string { // construct error string if level=`Error` if event.Level == pipelines.EventLevelError && event.Error != nil { for _, exception := range event.Error.Exceptions { - result.WriteString(fmt.Sprintf("\n%s", exception.Message)) + result.WriteString("\n" + exception.Message) } } return result.String() diff --git a/bundle/schema/jsonschema.json b/bundle/schema/jsonschema.json index 9a352ebb2..2f78ffcca 100644 --- a/bundle/schema/jsonschema.json +++ b/bundle/schema/jsonschema.json @@ -1100,6 +1100,10 @@ "description": "The PyDABs configuration.", "$ref": "#/$defs/github.com/databricks/cli/bundle/config.PyDABs" }, + "python": { + "description": "Configures loading of Python code defined with 'databricks-bundles' package.", + "$ref": "#/$defs/github.com/databricks/cli/bundle/config.Python" + }, "python_wheel_wrapper": { "description": "Whether to use a Python wheel wrapper", "$ref": "#/$defs/bool" @@ -1234,6 +1238,36 @@ } ] }, + "config.Python": { + "oneOf": [ + { + "type": "object", + "properties": { + "mutators": { + "description": "Mutators contains a list of fully qualified function paths to mutator functions.\n\nExample: [\"my_project.mutators:add_default_cluster\"]", + "$ref": "#/$defs/slice/string" + }, + "resources": { + "description": "Resources contains a list of fully qualified function paths to load resources\ndefined in Python code.\n\nExample: [\"my_project.resources:load_resources\"]", + "$ref": "#/$defs/slice/string" + }, + "venv_path": { + "description": "VEnvPath is path to the virtual environment.\n\nIf enabled, Python code will execute within this environment. If disabled,\nit defaults to using the Python interpreter available in the current shell.", + "$ref": "#/$defs/string" + } + }, + "additionalProperties": false, + "required": [ + "resources", + "mutators" + ] + }, + { + "type": "string", + "pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}" + } + ] + }, "config.Resources": { "oneOf": [ { diff --git a/bundle/tests/clusters_test.go b/bundle/tests/clusters_test.go deleted file mode 100644 index def8a2a31..000000000 --- a/bundle/tests/clusters_test.go +++ /dev/null @@ -1,36 +0,0 @@ -package config_tests - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestClusters(t *testing.T) { - b := load(t, "./clusters") - assert.Equal(t, "clusters", b.Config.Bundle.Name) - - cluster := b.Config.Resources.Clusters["foo"] - assert.Equal(t, "foo", cluster.ClusterName) - assert.Equal(t, "13.3.x-scala2.12", cluster.SparkVersion) - assert.Equal(t, "i3.xlarge", cluster.NodeTypeId) - assert.Equal(t, 2, cluster.NumWorkers) - assert.Equal(t, "2g", cluster.SparkConf["spark.executor.memory"]) - assert.Equal(t, 2, cluster.Autoscale.MinWorkers) - assert.Equal(t, 7, cluster.Autoscale.MaxWorkers) -} - -func TestClustersOverride(t *testing.T) { - b := loadTarget(t, "./clusters", "development") - assert.Equal(t, "clusters", b.Config.Bundle.Name) - - cluster := b.Config.Resources.Clusters["foo"] - assert.Equal(t, "foo-override", cluster.ClusterName) - assert.Equal(t, "15.2.x-scala2.12", cluster.SparkVersion) - assert.Equal(t, "m5.xlarge", cluster.NodeTypeId) - assert.Equal(t, 3, cluster.NumWorkers) - assert.Equal(t, "4g", cluster.SparkConf["spark.executor.memory"]) - assert.Equal(t, "4g", cluster.SparkConf["spark.executor.memory2"]) - assert.Equal(t, 1, cluster.Autoscale.MinWorkers) - assert.Equal(t, 3, cluster.Autoscale.MaxWorkers) -} diff --git a/bundle/tests/complex_variables_test.go b/bundle/tests/complex_variables_test.go deleted file mode 100644 index d72b5f157..000000000 --- a/bundle/tests/complex_variables_test.go +++ /dev/null @@ -1,108 +0,0 @@ -package config_tests - -import ( - "context" - "testing" - - "github.com/databricks/cli/bundle" - "github.com/databricks/cli/bundle/config/mutator" - "github.com/databricks/databricks-sdk-go/service/compute" - "github.com/stretchr/testify/require" -) - -func TestComplexVariables(t *testing.T) { - b, diags := loadTargetWithDiags("variables/complex", "default") - require.Empty(t, diags) - - diags = bundle.Apply(context.Background(), b, bundle.Seq( - mutator.SetVariables(), - mutator.ResolveVariableReferencesInComplexVariables(), - mutator.ResolveVariableReferences( - "variables", - ), - )) - require.NoError(t, diags.Error()) - - require.Equal(t, "13.2.x-scala2.11", b.Config.Resources.Jobs["my_job"].JobClusters[0].NewCluster.SparkVersion) - require.Equal(t, "Standard_DS3_v2", b.Config.Resources.Jobs["my_job"].JobClusters[0].NewCluster.NodeTypeId) - require.Equal(t, "some-policy-id", b.Config.Resources.Jobs["my_job"].JobClusters[0].NewCluster.PolicyId) - require.Equal(t, 2, b.Config.Resources.Jobs["my_job"].JobClusters[0].NewCluster.NumWorkers) - require.Equal(t, "true", b.Config.Resources.Jobs["my_job"].JobClusters[0].NewCluster.SparkConf["spark.speculation"]) - require.Equal(t, "true", b.Config.Resources.Jobs["my_job"].JobClusters[0].NewCluster.SparkConf["spark.random"]) - - require.Len(t, b.Config.Resources.Jobs["my_job"].Tasks[0].Libraries, 3) - require.Contains(t, b.Config.Resources.Jobs["my_job"].Tasks[0].Libraries, compute.Library{ - Jar: "/path/to/jar", - }) - require.Contains(t, b.Config.Resources.Jobs["my_job"].Tasks[0].Libraries, compute.Library{ - Egg: "/path/to/egg", - }) - require.Contains(t, b.Config.Resources.Jobs["my_job"].Tasks[0].Libraries, compute.Library{ - Whl: "/path/to/whl", - }) - - require.Equal(t, "task with spark version 13.2.x-scala2.11 and jar /path/to/jar", b.Config.Resources.Jobs["my_job"].Tasks[0].TaskKey) -} - -func TestComplexVariablesOverride(t *testing.T) { - b, diags := loadTargetWithDiags("variables/complex", "dev") - require.Empty(t, diags) - - diags = bundle.Apply(context.Background(), b, bundle.Seq( - mutator.SetVariables(), - mutator.ResolveVariableReferencesInComplexVariables(), - mutator.ResolveVariableReferences( - "variables", - ), - )) - require.NoError(t, diags.Error()) - - require.Equal(t, "14.2.x-scala2.11", b.Config.Resources.Jobs["my_job"].JobClusters[0].NewCluster.SparkVersion) - require.Equal(t, "Standard_DS3_v3", b.Config.Resources.Jobs["my_job"].JobClusters[0].NewCluster.NodeTypeId) - require.Equal(t, 4, b.Config.Resources.Jobs["my_job"].JobClusters[0].NewCluster.NumWorkers) - require.Equal(t, "false", b.Config.Resources.Jobs["my_job"].JobClusters[0].NewCluster.SparkConf["spark.speculation"]) - - // Making sure the variable is overriden and not merged / extended - // These properties are set in the default target but not set in override target - // So they should be empty - require.Equal(t, "", b.Config.Resources.Jobs["my_job"].JobClusters[0].NewCluster.SparkConf["spark.random"]) - require.Equal(t, "", b.Config.Resources.Jobs["my_job"].JobClusters[0].NewCluster.PolicyId) -} - -func TestComplexVariablesOverrideWithMultipleFiles(t *testing.T) { - b, diags := loadTargetWithDiags("variables/complex_multiple_files", "dev") - require.Empty(t, diags) - - diags = bundle.Apply(context.Background(), b, bundle.Seq( - mutator.SetVariables(), - mutator.ResolveVariableReferencesInComplexVariables(), - mutator.ResolveVariableReferences( - "variables", - ), - )) - require.NoError(t, diags.Error()) - for _, cluster := range b.Config.Resources.Jobs["my_job"].JobClusters { - require.Equalf(t, "14.2.x-scala2.11", cluster.NewCluster.SparkVersion, "cluster: %v", cluster.JobClusterKey) - require.Equalf(t, "Standard_DS3_v2", cluster.NewCluster.NodeTypeId, "cluster: %v", cluster.JobClusterKey) - require.Equalf(t, 4, cluster.NewCluster.NumWorkers, "cluster: %v", cluster.JobClusterKey) - require.Equalf(t, "false", cluster.NewCluster.SparkConf["spark.speculation"], "cluster: %v", cluster.JobClusterKey) - } -} - -func TestComplexVariablesOverrideWithFullSyntax(t *testing.T) { - b, diags := loadTargetWithDiags("variables/complex", "dev") - require.Empty(t, diags) - - diags = bundle.Apply(context.Background(), b, bundle.Seq( - mutator.SetVariables(), - mutator.ResolveVariableReferencesInComplexVariables(), - mutator.ResolveVariableReferences( - "variables", - ), - )) - require.NoError(t, diags.Error()) - require.Empty(t, diags) - - complexvar := b.Config.Variables["complexvar"].Value - require.Equal(t, map[string]any{"key1": "1", "key2": "2", "key3": "3"}, complexvar) -} diff --git a/bundle/tests/loader.go b/bundle/tests/loader.go index 5c48d81cb..bb68b3059 100644 --- a/bundle/tests/loader.go +++ b/bundle/tests/loader.go @@ -7,6 +7,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config/mutator" "github.com/databricks/cli/bundle/phases" + "github.com/databricks/cli/libs/dbr" "github.com/databricks/cli/libs/diag" "github.com/databricks/databricks-sdk-go/config" "github.com/databricks/databricks-sdk-go/experimental/mocks" @@ -66,7 +67,7 @@ func initializeTarget(t *testing.T, path, env string) (*bundle.Bundle, diag.Diag b := load(t, path) configureMock(t, b) - ctx := context.Background() + ctx := dbr.MockRuntime(context.Background(), false) diags := bundle.Apply(ctx, b, bundle.Seq( mutator.SelectTarget(env), phases.Initialize(), diff --git a/bundle/tests/override_job_cluster_test.go b/bundle/tests/override_job_cluster_test.go deleted file mode 100644 index 1393e03e5..000000000 --- a/bundle/tests/override_job_cluster_test.go +++ /dev/null @@ -1,29 +0,0 @@ -package config_tests - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestOverrideJobClusterDev(t *testing.T) { - b := loadTarget(t, "./override_job_cluster", "development") - assert.Equal(t, "job", b.Config.Resources.Jobs["foo"].Name) - assert.Len(t, b.Config.Resources.Jobs["foo"].JobClusters, 1) - - c := b.Config.Resources.Jobs["foo"].JobClusters[0] - assert.Equal(t, "13.3.x-scala2.12", c.NewCluster.SparkVersion) - assert.Equal(t, "i3.xlarge", c.NewCluster.NodeTypeId) - assert.Equal(t, 1, c.NewCluster.NumWorkers) -} - -func TestOverrideJobClusterStaging(t *testing.T) { - b := loadTarget(t, "./override_job_cluster", "staging") - assert.Equal(t, "job", b.Config.Resources.Jobs["foo"].Name) - assert.Len(t, b.Config.Resources.Jobs["foo"].JobClusters, 1) - - c := b.Config.Resources.Jobs["foo"].JobClusters[0] - assert.Equal(t, "13.3.x-scala2.12", c.NewCluster.SparkVersion) - assert.Equal(t, "i3.2xlarge", c.NewCluster.NodeTypeId) - assert.Equal(t, 4, c.NewCluster.NumWorkers) -} diff --git a/bundle/tests/override_job_tasks_test.go b/bundle/tests/override_job_tasks_test.go deleted file mode 100644 index 85463e17c..000000000 --- a/bundle/tests/override_job_tasks_test.go +++ /dev/null @@ -1,39 +0,0 @@ -package config_tests - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestOverrideTasksDev(t *testing.T) { - b := loadTarget(t, "./override_job_tasks", "development") - assert.Equal(t, "job", b.Config.Resources.Jobs["foo"].Name) - assert.Len(t, b.Config.Resources.Jobs["foo"].Tasks, 2) - - tasks := b.Config.Resources.Jobs["foo"].Tasks - assert.Equal(t, "key1", tasks[0].TaskKey) - assert.Equal(t, "i3.xlarge", tasks[0].NewCluster.NodeTypeId) - assert.Equal(t, 1, tasks[0].NewCluster.NumWorkers) - assert.Equal(t, "./test1.py", tasks[0].SparkPythonTask.PythonFile) - - assert.Equal(t, "key2", tasks[1].TaskKey) - assert.Equal(t, "13.3.x-scala2.12", tasks[1].NewCluster.SparkVersion) - assert.Equal(t, "./test2.py", tasks[1].SparkPythonTask.PythonFile) -} - -func TestOverrideTasksStaging(t *testing.T) { - b := loadTarget(t, "./override_job_tasks", "staging") - assert.Equal(t, "job", b.Config.Resources.Jobs["foo"].Name) - assert.Len(t, b.Config.Resources.Jobs["foo"].Tasks, 2) - - tasks := b.Config.Resources.Jobs["foo"].Tasks - assert.Equal(t, "key1", tasks[0].TaskKey) - assert.Equal(t, "13.3.x-scala2.12", tasks[0].NewCluster.SparkVersion) - assert.Equal(t, "./test1.py", tasks[0].SparkPythonTask.PythonFile) - - assert.Equal(t, "key2", tasks[1].TaskKey) - assert.Equal(t, "i3.2xlarge", tasks[1].NewCluster.NodeTypeId) - assert.Equal(t, 4, tasks[1].NewCluster.NumWorkers) - assert.Equal(t, "./test3.py", tasks[1].SparkPythonTask.PythonFile) -} diff --git a/bundle/tests/override_pipeline_cluster_test.go b/bundle/tests/override_pipeline_cluster_test.go deleted file mode 100644 index 591fe423d..000000000 --- a/bundle/tests/override_pipeline_cluster_test.go +++ /dev/null @@ -1,29 +0,0 @@ -package config_tests - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestOverridePipelineClusterDev(t *testing.T) { - b := loadTarget(t, "./override_pipeline_cluster", "development") - assert.Equal(t, "job", b.Config.Resources.Pipelines["foo"].Name) - assert.Len(t, b.Config.Resources.Pipelines["foo"].Clusters, 1) - - c := b.Config.Resources.Pipelines["foo"].Clusters[0] - assert.Equal(t, map[string]string{"foo": "bar"}, c.SparkConf) - assert.Equal(t, "i3.xlarge", c.NodeTypeId) - assert.Equal(t, 1, c.NumWorkers) -} - -func TestOverridePipelineClusterStaging(t *testing.T) { - b := loadTarget(t, "./override_pipeline_cluster", "staging") - assert.Equal(t, "job", b.Config.Resources.Pipelines["foo"].Name) - assert.Len(t, b.Config.Resources.Pipelines["foo"].Clusters, 1) - - c := b.Config.Resources.Pipelines["foo"].Clusters[0] - assert.Equal(t, map[string]string{"foo": "bar"}, c.SparkConf) - assert.Equal(t, "i3.2xlarge", c.NodeTypeId) - assert.Equal(t, 4, c.NumWorkers) -} diff --git a/bundle/tests/run_as_test.go b/bundle/tests/run_as_test.go index 113a6140b..03ff51ec5 100644 --- a/bundle/tests/run_as_test.go +++ b/bundle/tests/run_as_test.go @@ -2,7 +2,6 @@ package config_tests import ( "context" - "fmt" "testing" "github.com/databricks/cli/bundle" @@ -219,7 +218,7 @@ func TestRunAsErrorNeitherUserOrSpSpecified(t *testing.T) { for _, tc := range tcases { t.Run(tc.name, func(t *testing.T) { - bundlePath := fmt.Sprintf("./run_as/not_allowed/neither_sp_nor_user/%s", tc.name) + bundlePath := "./run_as/not_allowed/neither_sp_nor_user/" + tc.name b := load(t, bundlePath) ctx := context.Background() diff --git a/bundle/tests/variables_test.go b/bundle/tests/variables_test.go deleted file mode 100644 index 37d488fad..000000000 --- a/bundle/tests/variables_test.go +++ /dev/null @@ -1,206 +0,0 @@ -package config_tests - -import ( - "context" - "testing" - - "github.com/databricks/cli/bundle" - "github.com/databricks/cli/bundle/config/mutator" - "github.com/databricks/databricks-sdk-go/experimental/mocks" - "github.com/databricks/databricks-sdk-go/service/compute" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" -) - -func TestVariables(t *testing.T) { - t.Setenv("BUNDLE_VAR_b", "def") - b := load(t, "./variables/vanilla") - diags := bundle.Apply(context.Background(), b, bundle.Seq( - mutator.SetVariables(), - mutator.ResolveVariableReferences( - "variables", - ), - )) - require.NoError(t, diags.Error()) - assert.Equal(t, "abc def", b.Config.Bundle.Name) -} - -func TestVariablesLoadingFailsWhenRequiredVariableIsNotSpecified(t *testing.T) { - b := load(t, "./variables/vanilla") - diags := bundle.Apply(context.Background(), b, bundle.Seq( - mutator.SetVariables(), - mutator.ResolveVariableReferences( - "variables", - ), - )) - assert.ErrorContains(t, diags.Error(), "no value assigned to required variable b. Assignment can be done through the \"--var\" flag or by setting the BUNDLE_VAR_b environment variable") -} - -func TestVariablesTargetsBlockOverride(t *testing.T) { - b := load(t, "./variables/env_overrides") - diags := bundle.Apply(context.Background(), b, bundle.Seq( - mutator.SelectTarget("env-with-single-variable-override"), - mutator.SetVariables(), - mutator.ResolveVariableReferences( - "variables", - ), - )) - require.NoError(t, diags.Error()) - assert.Equal(t, "default-a dev-b", b.Config.Workspace.Profile) -} - -func TestVariablesTargetsBlockOverrideForMultipleVariables(t *testing.T) { - b := load(t, "./variables/env_overrides") - diags := bundle.Apply(context.Background(), b, bundle.Seq( - mutator.SelectTarget("env-with-two-variable-overrides"), - mutator.SetVariables(), - mutator.ResolveVariableReferences( - "variables", - ), - )) - require.NoError(t, diags.Error()) - assert.Equal(t, "prod-a prod-b", b.Config.Workspace.Profile) -} - -func TestVariablesTargetsBlockOverrideWithProcessEnvVars(t *testing.T) { - t.Setenv("BUNDLE_VAR_b", "env-var-b") - b := load(t, "./variables/env_overrides") - diags := bundle.Apply(context.Background(), b, bundle.Seq( - mutator.SelectTarget("env-with-two-variable-overrides"), - mutator.SetVariables(), - mutator.ResolveVariableReferences( - "variables", - ), - )) - require.NoError(t, diags.Error()) - assert.Equal(t, "prod-a env-var-b", b.Config.Workspace.Profile) -} - -func TestVariablesTargetsBlockOverrideWithMissingVariables(t *testing.T) { - b := load(t, "./variables/env_overrides") - diags := bundle.Apply(context.Background(), b, bundle.Seq( - mutator.SelectTarget("env-missing-a-required-variable-assignment"), - mutator.SetVariables(), - mutator.ResolveVariableReferences( - "variables", - ), - )) - assert.ErrorContains(t, diags.Error(), "no value assigned to required variable b. Assignment can be done through the \"--var\" flag or by setting the BUNDLE_VAR_b environment variable") -} - -func TestVariablesTargetsBlockOverrideWithUndefinedVariables(t *testing.T) { - b := load(t, "./variables/env_overrides") - diags := bundle.Apply(context.Background(), b, bundle.Seq( - mutator.SelectTarget("env-using-an-undefined-variable"), - mutator.SetVariables(), - mutator.ResolveVariableReferences( - "variables", - ), - )) - assert.ErrorContains(t, diags.Error(), "variable c is not defined but is assigned a value") -} - -func TestVariablesWithoutDefinition(t *testing.T) { - t.Setenv("BUNDLE_VAR_a", "foo") - t.Setenv("BUNDLE_VAR_b", "bar") - b := load(t, "./variables/without_definition") - diags := bundle.Apply(context.Background(), b, mutator.SetVariables()) - require.NoError(t, diags.Error()) - require.True(t, b.Config.Variables["a"].HasValue()) - require.True(t, b.Config.Variables["b"].HasValue()) - assert.Equal(t, "foo", b.Config.Variables["a"].Value) - assert.Equal(t, "bar", b.Config.Variables["b"].Value) -} - -func TestVariablesWithTargetLookupOverrides(t *testing.T) { - b := load(t, "./variables/env_overrides") - - mockWorkspaceClient := mocks.NewMockWorkspaceClient(t) - b.SetWorkpaceClient(mockWorkspaceClient.WorkspaceClient) - instancePoolApi := mockWorkspaceClient.GetMockInstancePoolsAPI() - instancePoolApi.EXPECT().GetByInstancePoolName(mock.Anything, "some-test-instance-pool").Return(&compute.InstancePoolAndStats{ - InstancePoolId: "1234", - }, nil) - - clustersApi := mockWorkspaceClient.GetMockClustersAPI() - clustersApi.EXPECT().ListAll(mock.Anything, compute.ListClustersRequest{ - FilterBy: &compute.ListClustersFilterBy{ - ClusterSources: []compute.ClusterSource{compute.ClusterSourceApi, compute.ClusterSourceUi}, - }, - }).Return([]compute.ClusterDetails{ - {ClusterId: "4321", ClusterName: "some-test-cluster"}, - {ClusterId: "9876", ClusterName: "some-other-cluster"}, - }, nil) - - clusterPoliciesApi := mockWorkspaceClient.GetMockClusterPoliciesAPI() - clusterPoliciesApi.EXPECT().GetByName(mock.Anything, "some-test-cluster-policy").Return(&compute.Policy{ - PolicyId: "9876", - }, nil) - - diags := bundle.Apply(context.Background(), b, bundle.Seq( - mutator.SelectTarget("env-overrides-lookup"), - mutator.SetVariables(), - mutator.ResolveResourceReferences(), - )) - - require.NoError(t, diags.Error()) - assert.Equal(t, "4321", b.Config.Variables["d"].Value) - assert.Equal(t, "1234", b.Config.Variables["e"].Value) - assert.Equal(t, "9876", b.Config.Variables["f"].Value) -} - -func TestVariableTargetOverrides(t *testing.T) { - tcases := []struct { - targetName string - pipelineName string - pipelineContinuous bool - pipelineNumWorkers int - }{ - { - "use-default-variable-values", - "a_string", - true, - 42, - }, - { - "override-string-variable", - "overridden_string", - true, - 42, - }, - { - "override-int-variable", - "a_string", - true, - 43, - }, - { - "override-both-bool-and-string-variables", - "overridden_string", - false, - 42, - }, - } - - for _, tcase := range tcases { - t.Run(tcase.targetName, func(t *testing.T) { - b := loadTarget(t, "./variables/variable_overrides_in_target", tcase.targetName) - diags := bundle.Apply(context.Background(), b, bundle.Seq( - mutator.SetVariables(), - mutator.ResolveVariableReferences("variables")), - ) - require.NoError(t, diags.Error()) - - assert.Equal(t, tcase.pipelineName, b.Config.Resources.Pipelines["my_pipeline"].Name) - assert.Equal(t, tcase.pipelineContinuous, b.Config.Resources.Pipelines["my_pipeline"].Continuous) - assert.Equal(t, tcase.pipelineNumWorkers, b.Config.Resources.Pipelines["my_pipeline"].Clusters[0].NumWorkers) - }) - } -} - -func TestBundleWithEmptyVariableLoads(t *testing.T) { - b := load(t, "./variables/empty") - diags := bundle.Apply(context.Background(), b, mutator.SetVariables()) - require.ErrorContains(t, diags.Error(), "no value assigned to required variable a") -} diff --git a/bundle/trampoline/python_wheel.go b/bundle/trampoline/python_wheel.go index 8e309a625..075804479 100644 --- a/bundle/trampoline/python_wheel.go +++ b/bundle/trampoline/python_wheel.go @@ -2,6 +2,7 @@ package trampoline import ( "context" + "errors" "fmt" "strconv" "strings" @@ -147,7 +148,7 @@ func (t *pythonTrampoline) GetTemplateData(task *jobs.Task) (map[string]any, err func (t *pythonTrampoline) generateParameters(task *jobs.PythonWheelTask) (string, error) { if task.Parameters != nil && task.NamedParameters != nil { - return "", fmt.Errorf("not allowed to pass both paramaters and named_parameters") + return "", errors.New("not allowed to pass both paramaters and named_parameters") } params := append([]string{task.PackageName}, task.Parameters...) for k, v := range task.NamedParameters { diff --git a/bundle/trampoline/trampoline_test.go b/bundle/trampoline/trampoline_test.go index 3c5d18570..6e6b8db48 100644 --- a/bundle/trampoline/trampoline_test.go +++ b/bundle/trampoline/trampoline_test.go @@ -2,7 +2,7 @@ package trampoline import ( "context" - "fmt" + "errors" "os" "path/filepath" "testing" @@ -30,7 +30,7 @@ func (f *functions) GetTasks(b *bundle.Bundle) []TaskWithJobKey { func (f *functions) GetTemplateData(task *jobs.Task) (map[string]any, error) { if task.PythonWheelTask == nil { - return nil, fmt.Errorf("PythonWheelTask cannot be nil") + return nil, errors.New("PythonWheelTask cannot be nil") } data := make(map[string]any) diff --git a/cmd/auth/auth.go b/cmd/auth/auth.go index ceceae25c..4261e93e7 100644 --- a/cmd/auth/auth.go +++ b/cmd/auth/auth.go @@ -2,7 +2,7 @@ package auth import ( "context" - "fmt" + "errors" "github.com/databricks/cli/libs/auth" "github.com/databricks/cli/libs/cmdio" @@ -36,7 +36,7 @@ GCP: https://docs.gcp.databricks.com/dev-tools/auth/index.html`, func promptForHost(ctx context.Context) (string, error) { if !cmdio.IsInTTY(ctx) { - return "", fmt.Errorf("the command is being run in a non-interactive environment, please specify a host using --host") + return "", errors.New("the command is being run in a non-interactive environment, please specify a host using --host") } prompt := cmdio.Prompt(ctx) @@ -46,7 +46,7 @@ func promptForHost(ctx context.Context) (string, error) { func promptForAccountID(ctx context.Context) (string, error) { if !cmdio.IsInTTY(ctx) { - return "", fmt.Errorf("the command is being run in a non-interactive environment, please specify an account ID using --account-id") + return "", errors.New("the command is being run in a non-interactive environment, please specify an account ID using --account-id") } prompt := cmdio.Prompt(ctx) diff --git a/cmd/auth/describe_test.go b/cmd/auth/describe_test.go index 7f5f900d4..35e0c6e64 100644 --- a/cmd/auth/describe_test.go +++ b/cmd/auth/describe_test.go @@ -2,7 +2,7 @@ package auth import ( "context" - "fmt" + "errors" "testing" "github.com/databricks/cli/cmd/root" @@ -102,7 +102,7 @@ func TestGetWorkspaceAuthStatusError(t *testing.T) { "token": "test-token", "auth_type": "azure-cli", }) - return cfg, false, fmt.Errorf("auth error") + return cfg, false, errors.New("auth error") }) require.NoError(t, err) require.NotNil(t, status) @@ -151,7 +151,7 @@ func TestGetWorkspaceAuthStatusSensitive(t *testing.T) { "token": "test-token", "auth_type": "azure-cli", }) - return cfg, false, fmt.Errorf("auth error") + return cfg, false, errors.New("auth error") }) require.NoError(t, err) require.NotNil(t, status) diff --git a/cmd/auth/env.go b/cmd/auth/env.go index 52b7cbbfd..11149af8c 100644 --- a/cmd/auth/env.go +++ b/cmd/auth/env.go @@ -23,9 +23,9 @@ func canonicalHost(host string) (string, error) { } // If the host is empty, assume the scheme wasn't included. if parsedHost.Host == "" { - return fmt.Sprintf("https://%s", host), nil + return "https://" + host, nil } - return fmt.Sprintf("https://%s", parsedHost.Host), nil + return "https://" + parsedHost.Host, nil } var ErrNoMatchingProfiles = errors.New("no matching profiles found") diff --git a/cmd/auth/login.go b/cmd/auth/login.go index c98676599..a6d0bf4cc 100644 --- a/cmd/auth/login.go +++ b/cmd/auth/login.go @@ -176,7 +176,7 @@ depends on the existing profiles you have set in your configuration file func setHostAndAccountId(ctx context.Context, profileName string, persistentAuth *auth.PersistentAuth, args []string) error { // If both [HOST] and --host are provided, return an error. if len(args) > 0 && persistentAuth.Host != "" { - return fmt.Errorf("please only provide a host as an argument or a flag, not both") + return errors.New("please only provide a host as an argument or a flag, not both") } profiler := profile.GetProfiler(ctx) diff --git a/cmd/bundle/destroy.go b/cmd/bundle/destroy.go index 711abbcd7..0b2f14875 100644 --- a/cmd/bundle/destroy.go +++ b/cmd/bundle/destroy.go @@ -2,7 +2,7 @@ package bundle import ( "context" - "fmt" + "errors" "os" "github.com/databricks/cli/bundle" @@ -49,16 +49,16 @@ func newDestroyCommand() *cobra.Command { // we require auto-approve for non tty terminals since interactive consent // is not possible if !term.IsTerminal(int(os.Stderr.Fd())) && !autoApprove { - return fmt.Errorf("please specify --auto-approve to skip interactive confirmation checks for non tty consoles") + return errors.New("please specify --auto-approve to skip interactive confirmation checks for non tty consoles") } // Check auto-approve is selected for json logging logger, ok := cmdio.FromContext(ctx) if !ok { - return fmt.Errorf("progress logger not found") + return errors.New("progress logger not found") } if logger.Mode == flags.ModeJson && !autoApprove { - return fmt.Errorf("please specify --auto-approve since selected logging format is json") + return errors.New("please specify --auto-approve since selected logging format is json") } diags = bundle.Apply(ctx, b, bundle.Seq( diff --git a/cmd/bundle/generate/dashboard.go b/cmd/bundle/generate/dashboard.go index f196bbe62..fa3c91b2a 100644 --- a/cmd/bundle/generate/dashboard.go +++ b/cmd/bundle/generate/dashboard.go @@ -96,7 +96,7 @@ func (d *dashboard) resolveFromPath(ctx context.Context, b *bundle.Bundle) (stri return "", diag.Diagnostics{ { Severity: diag.Error, - Summary: fmt.Sprintf("expected a dashboard, found a %s", found), + Summary: "expected a dashboard, found a " + found, }, } } @@ -188,7 +188,7 @@ func (d *dashboard) saveSerializedDashboard(_ context.Context, b *bundle.Bundle, func (d *dashboard) saveConfiguration(ctx context.Context, b *bundle.Bundle, dashboard *dashboards.Dashboard, key string) error { // Save serialized dashboard definition to the dashboard directory. - dashboardBasename := fmt.Sprintf("%s.lvdash.json", key) + dashboardBasename := key + ".lvdash.json" dashboardPath := filepath.Join(d.dashboardDir, dashboardBasename) err := d.saveSerializedDashboard(ctx, b, dashboard, dashboardPath) if err != nil { @@ -215,7 +215,7 @@ func (d *dashboard) saveConfiguration(ctx context.Context, b *bundle.Bundle, das } // Save the configuration to the resource directory. - resourcePath := filepath.Join(d.resourceDir, fmt.Sprintf("%s.dashboard.yml", key)) + resourcePath := filepath.Join(d.resourceDir, key+".dashboard.yml") saver := yamlsaver.NewSaverWithStyle(map[string]yaml.Style{ "display_name": yaml.DoubleQuotedStyle, }) diff --git a/cmd/bundle/generate/job.go b/cmd/bundle/generate/job.go index 9ac41e3cb..827d270e5 100644 --- a/cmd/bundle/generate/job.go +++ b/cmd/bundle/generate/job.go @@ -85,8 +85,8 @@ func NewGenerateJobCommand() *cobra.Command { return err } - oldFilename := filepath.Join(configDir, fmt.Sprintf("%s.yml", jobKey)) - filename := filepath.Join(configDir, fmt.Sprintf("%s.job.yml", jobKey)) + oldFilename := filepath.Join(configDir, jobKey+".yml") + filename := filepath.Join(configDir, jobKey+".job.yml") // User might continuously run generate command to update their bundle jobs with any changes made in Databricks UI. // Due to changing in the generated file names, we need to first rename existing resource file to the new name. @@ -107,7 +107,7 @@ func NewGenerateJobCommand() *cobra.Command { return err } - cmdio.LogString(ctx, fmt.Sprintf("Job configuration successfully saved to %s", filename)) + cmdio.LogString(ctx, "Job configuration successfully saved to "+filename) return nil } diff --git a/cmd/bundle/generate/pipeline.go b/cmd/bundle/generate/pipeline.go index 910baa45f..863b0b2f7 100644 --- a/cmd/bundle/generate/pipeline.go +++ b/cmd/bundle/generate/pipeline.go @@ -85,8 +85,8 @@ func NewGeneratePipelineCommand() *cobra.Command { return err } - oldFilename := filepath.Join(configDir, fmt.Sprintf("%s.yml", pipelineKey)) - filename := filepath.Join(configDir, fmt.Sprintf("%s.pipeline.yml", pipelineKey)) + oldFilename := filepath.Join(configDir, pipelineKey+".yml") + filename := filepath.Join(configDir, pipelineKey+".pipeline.yml") // User might continuously run generate command to update their bundle jobs with any changes made in Databricks UI. // Due to changing in the generated file names, we need to first rename existing resource file to the new name. @@ -109,7 +109,7 @@ func NewGeneratePipelineCommand() *cobra.Command { return err } - cmdio.LogString(ctx, fmt.Sprintf("Pipeline configuration successfully saved to %s", filename)) + cmdio.LogString(ctx, "Pipeline configuration successfully saved to "+filename) return nil } diff --git a/cmd/bundle/generate/utils.go b/cmd/bundle/generate/utils.go index 8e3764e35..dbfad9438 100644 --- a/cmd/bundle/generate/utils.go +++ b/cmd/bundle/generate/utils.go @@ -126,7 +126,7 @@ func (n *downloader) FlushToDisk(ctx context.Context, force bool) error { return err } - cmdio.LogString(errCtx, fmt.Sprintf("File successfully saved to %s", targetPath)) + cmdio.LogString(errCtx, "File successfully saved to "+targetPath) return reader.Close() }) } diff --git a/cmd/bundle/launch.go b/cmd/bundle/launch.go index 0d2b4233b..3fea839c9 100644 --- a/cmd/bundle/launch.go +++ b/cmd/bundle/launch.go @@ -1,7 +1,7 @@ package bundle import ( - "fmt" + "errors" "github.com/databricks/cli/cmd/root" "github.com/spf13/cobra" @@ -19,7 +19,7 @@ func newLaunchCommand() *cobra.Command { } cmd.RunE = func(cmd *cobra.Command, args []string) error { - return fmt.Errorf("TODO") + return errors.New("TODO") // contents, err := os.ReadFile(args[0]) // if err != nil { // return err diff --git a/cmd/bundle/open.go b/cmd/bundle/open.go index a2ad32fd8..5a26e1ea7 100644 --- a/cmd/bundle/open.go +++ b/cmd/bundle/open.go @@ -44,7 +44,7 @@ func resolveOpenArgument(ctx context.Context, b *bundle.Bundle, args []string) ( } if len(args) < 1 { - return "", fmt.Errorf("expected a KEY of the resource to open") + return "", errors.New("expected a KEY of the resource to open") } return args[0], nil @@ -113,7 +113,7 @@ func newOpenCommand() *cobra.Command { // Confirm that the resource has a URL. url := ref.Resource.GetURL() if url == "" { - return fmt.Errorf("resource does not have a URL associated with it (has it been deployed?)") + return errors.New("resource does not have a URL associated with it (has it been deployed?)") } return browser.OpenURL(url) diff --git a/cmd/bundle/run.go b/cmd/bundle/run.go index 3bcebddd5..df35d7222 100644 --- a/cmd/bundle/run.go +++ b/cmd/bundle/run.go @@ -3,6 +3,7 @@ package bundle import ( "context" "encoding/json" + "errors" "fmt" "github.com/databricks/cli/bundle" @@ -48,7 +49,7 @@ func resolveRunArgument(ctx context.Context, b *bundle.Bundle, args []string) (s } if len(args) < 1 { - return "", nil, fmt.Errorf("expected a KEY of the resource to run") + return "", nil, errors.New("expected a KEY of the resource to run") } return args[0], args[1:], nil diff --git a/cmd/bundle/test.go b/cmd/bundle/test.go index 4d30e727d..794575220 100644 --- a/cmd/bundle/test.go +++ b/cmd/bundle/test.go @@ -1,7 +1,7 @@ package bundle import ( - "fmt" + "errors" "github.com/spf13/cobra" ) @@ -17,7 +17,7 @@ func newTestCommand() *cobra.Command { } cmd.RunE = func(cmd *cobra.Command, args []string) error { - return fmt.Errorf("TODO") + return errors.New("TODO") // results := project.RunPythonOnDev(cmd.Context(), `return 1`) // if results.Failed() { // return results.Err() diff --git a/cmd/bundle/validate.go b/cmd/bundle/validate.go index 3b50cc258..daeb7426d 100644 --- a/cmd/bundle/validate.go +++ b/cmd/bundle/validate.go @@ -2,6 +2,7 @@ package bundle import ( "encoding/json" + "errors" "fmt" "github.com/databricks/cli/bundle" @@ -39,7 +40,7 @@ func newValidateCommand() *cobra.Command { if err := diags.Error(); err != nil { return diags.Error() } else { - return fmt.Errorf("invariant failed: returned bundle is nil") + return errors.New("invariant failed: returned bundle is nil") } } diff --git a/cmd/configure/configure.go b/cmd/configure/configure.go index 895a5902c..4a6568d06 100644 --- a/cmd/configure/configure.go +++ b/cmd/configure/configure.go @@ -1,6 +1,7 @@ package configure import ( + "errors" "fmt" "github.com/databricks/cli/libs/cmdio" @@ -62,12 +63,12 @@ func configureInteractive(cmd *cobra.Command, flags *configureFlags, cfg *config func configureNonInteractive(cmd *cobra.Command, flags *configureFlags, cfg *config.Config) error { if cfg.Host == "" { - return fmt.Errorf("host must be set in non-interactive mode") + return errors.New("host must be set in non-interactive mode") } // Check presence of cluster ID before reading token to fail fast. if flags.ConfigureCluster && cfg.ClusterID == "" { - return fmt.Errorf("cluster ID must be set in non-interactive mode") + return errors.New("cluster ID must be set in non-interactive mode") } // Read token from stdin if not already set. diff --git a/cmd/configure/host.go b/cmd/configure/host.go index 781c12387..0a454c6d1 100644 --- a/cmd/configure/host.go +++ b/cmd/configure/host.go @@ -1,7 +1,7 @@ package configure import ( - "fmt" + "errors" "net/url" ) @@ -11,10 +11,10 @@ func validateHost(s string) error { return err } if u.Host == "" || u.Scheme != "https" { - return fmt.Errorf("must start with https://") + return errors.New("must start with https://") } if u.Path != "" && u.Path != "/" { - return fmt.Errorf("must use empty path") + return errors.New("must use empty path") } return nil } diff --git a/cmd/labs/github/repositories.go b/cmd/labs/github/repositories.go index 850cdb1cb..afdf7aeb2 100644 --- a/cmd/labs/github/repositories.go +++ b/cmd/labs/github/repositories.go @@ -12,7 +12,7 @@ import ( const repositoryCacheTTL = 24 * time.Hour func NewRepositoryCache(org, cacheDir string) *repositoryCache { - filename := fmt.Sprintf("%s-repositories", org) + filename := org + "-repositories" return &repositoryCache{ cache: localcache.NewLocalCache[Repositories](cacheDir, filename, repositoryCacheTTL), Org: org, diff --git a/cmd/labs/installed.go b/cmd/labs/installed.go index e4249c9ff..9982cc1f0 100644 --- a/cmd/labs/installed.go +++ b/cmd/labs/installed.go @@ -1,6 +1,7 @@ package labs import ( + "errors" "fmt" "github.com/databricks/cli/cmd/labs/project" @@ -49,7 +50,7 @@ func newInstalledCommand() *cobra.Command { }) } if len(info.Projects) == 0 { - return fmt.Errorf("no projects installed") + return errors.New("no projects installed") } return cmdio.Render(ctx, info) }, diff --git a/cmd/labs/localcache/jsonfile.go b/cmd/labs/localcache/jsonfile.go index 6540e4ac2..50ed372f5 100644 --- a/cmd/labs/localcache/jsonfile.go +++ b/cmd/labs/localcache/jsonfile.go @@ -93,7 +93,7 @@ func (r *LocalCache[T]) writeCache(ctx context.Context, data T) (T, error) { } func (r *LocalCache[T]) FileName() string { - return filepath.Join(r.dir, fmt.Sprintf("%s.json", r.name)) + return filepath.Join(r.dir, r.name+".json") } func (r *LocalCache[T]) loadCache() (*cached[T], error) { diff --git a/cmd/labs/localcache/jsonfile_test.go b/cmd/labs/localcache/jsonfile_test.go index 9d42c6179..8172b7d14 100644 --- a/cmd/labs/localcache/jsonfile_test.go +++ b/cmd/labs/localcache/jsonfile_test.go @@ -3,7 +3,6 @@ package localcache import ( "context" "errors" - "fmt" "net/url" "runtime" "testing" @@ -115,7 +114,7 @@ func TestFolderDisappears(t *testing.T) { func TestRefreshFails(t *testing.T) { c := NewLocalCache[int64](t.TempDir(), "time", 1*time.Minute) tick := func() (int64, error) { - return 0, fmt.Errorf("nope") + return 0, errors.New("nope") } ctx := context.Background() _, err := c.Load(ctx, tick) diff --git a/cmd/labs/project/installer.go b/cmd/labs/project/installer.go index 041415964..7d31623bb 100644 --- a/cmd/labs/project/installer.go +++ b/cmd/labs/project/installer.go @@ -175,7 +175,7 @@ func (i *installer) login(ctx context.Context) (*databricks.WorkspaceClient, err return nil, fmt.Errorf("valid: %w", err) } if !i.HasAccountLevelCommands() && cfg.IsAccountClient() { - return nil, fmt.Errorf("got account-level client, but no account-level commands") + return nil, errors.New("got account-level client, but no account-level commands") } lc := &loginConfig{Entrypoint: i.Installer.Entrypoint} w, err := lc.askWorkspace(ctx, cfg) @@ -200,10 +200,10 @@ func (i *installer) downloadLibrary(ctx context.Context) error { libTarget := i.LibDir() // we may support wheels, jars, and golang binaries. but those are not zipballs if i.IsZipball() { - feedback <- fmt.Sprintf("Downloading and unpacking zipball for %s", i.version) + feedback <- "Downloading and unpacking zipball for " + i.version return i.downloadAndUnpackZipball(ctx, libTarget) } - return fmt.Errorf("we only support zipballs for now") + return errors.New("we only support zipballs for now") } func (i *installer) downloadAndUnpackZipball(ctx context.Context, libTarget string) error { @@ -234,7 +234,7 @@ func (i *installer) setupPythonVirtualEnvironment(ctx context.Context, w *databr log.Debugf(ctx, "Detected Python %s at: %s", py.Version, py.Path) venvPath := i.virtualEnvPath(ctx) log.Debugf(ctx, "Creating Python Virtual Environment at: %s", venvPath) - feedback <- fmt.Sprintf("Creating Virtual Environment with Python %s", py.Version) + feedback <- "Creating Virtual Environment with Python " + py.Version _, err = process.Background(ctx, []string{py.Path, "-m", "venv", venvPath}) if err != nil { return fmt.Errorf("create venv: %w", err) @@ -251,8 +251,8 @@ func (i *installer) setupPythonVirtualEnvironment(ctx context.Context, w *databr if !ok { return fmt.Errorf("unsupported runtime: %s", cluster.SparkVersion) } - feedback <- fmt.Sprintf("Installing Databricks Connect v%s", runtimeVersion) - pipSpec := fmt.Sprintf("databricks-connect==%s", runtimeVersion) + feedback <- "Installing Databricks Connect v" + runtimeVersion + pipSpec := "databricks-connect==" + runtimeVersion err = i.installPythonDependencies(ctx, pipSpec) if err != nil { return fmt.Errorf("dbconnect: %w", err) diff --git a/cmd/labs/show.go b/cmd/labs/show.go index c36f0bda3..e8c876d8b 100644 --- a/cmd/labs/show.go +++ b/cmd/labs/show.go @@ -1,7 +1,7 @@ package labs import ( - "fmt" + "errors" "github.com/databricks/cli/cmd/labs/project" "github.com/databricks/cli/cmd/root" @@ -34,7 +34,7 @@ func newShowCommand() *cobra.Command { return err } if len(installed) == 0 { - return fmt.Errorf("no projects found") + return errors.New("no projects found") } name := args[0] for _, v := range installed { diff --git a/cmd/root/auth.go b/cmd/root/auth.go index 07ab48399..49abfd414 100644 --- a/cmd/root/auth.go +++ b/cmd/root/auth.go @@ -26,7 +26,7 @@ type ErrNoWorkspaceProfiles struct { } func (e ErrNoWorkspaceProfiles) Error() string { - return fmt.Sprintf("%s does not contain workspace profiles; please create one by running 'databricks configure'", e.path) + return e.path + " does not contain workspace profiles; please create one by running 'databricks configure'" } type ErrNoAccountProfiles struct { @@ -34,7 +34,7 @@ type ErrNoAccountProfiles struct { } func (e ErrNoAccountProfiles) Error() string { - return fmt.Sprintf("%s does not contain account profiles", e.path) + return e.path + " does not contain account profiles" } func initProfileFlag(cmd *cobra.Command) { @@ -253,7 +253,7 @@ func AskForWorkspaceProfile(ctx context.Context) (string, error) { return profiles[0].Name, nil } i, _, err := cmdio.RunSelect(ctx, &promptui.Select{ - Label: fmt.Sprintf("Workspace profiles defined in %s", path), + Label: "Workspace profiles defined in " + path, Items: profiles, Searcher: profiles.SearchCaseInsensitive, StartInSearchMode: true, @@ -287,7 +287,7 @@ func AskForAccountProfile(ctx context.Context) (string, error) { return profiles[0].Name, nil } i, _, err := cmdio.RunSelect(ctx, &promptui.Select{ - Label: fmt.Sprintf("Account profiles defined in %s", path), + Label: "Account profiles defined in " + path, Items: profiles, Searcher: profiles.SearchCaseInsensitive, StartInSearchMode: true, diff --git a/cmd/root/progress_logger.go b/cmd/root/progress_logger.go index 1458de13a..0cc49b2ac 100644 --- a/cmd/root/progress_logger.go +++ b/cmd/root/progress_logger.go @@ -2,7 +2,7 @@ package root import ( "context" - "fmt" + "errors" "os" "github.com/databricks/cli/libs/cmdio" @@ -37,7 +37,7 @@ func (f *progressLoggerFlag) initializeContext(ctx context.Context) (context.Con if f.log.level.String() != "disabled" && f.log.file.String() == "stderr" && f.ProgressLogFormat == flags.ModeInplace { - return nil, fmt.Errorf("inplace progress logging cannot be used when log-file is stderr") + return nil, errors.New("inplace progress logging cannot be used when log-file is stderr") } format := f.ProgressLogFormat diff --git a/cmd/sync/completion.go b/cmd/sync/completion.go index 422147713..5a65dd528 100644 --- a/cmd/sync/completion.go +++ b/cmd/sync/completion.go @@ -2,7 +2,6 @@ package sync import ( "context" - "fmt" "path" "strings" @@ -52,8 +51,8 @@ func completeRemotePath( } prefixes := []string{ - path.Clean(fmt.Sprintf("/Users/%s", me.UserName)) + "/", - path.Clean(fmt.Sprintf("/Repos/%s", me.UserName)) + "/", + path.Clean("/Users/"+me.UserName) + "/", + path.Clean("/Repos/"+me.UserName) + "/", } validPrefix := false diff --git a/cmd/sync/sync.go b/cmd/sync/sync.go index cd2167a19..dea40f96a 100644 --- a/cmd/sync/sync.go +++ b/cmd/sync/sync.go @@ -2,6 +2,7 @@ package sync import ( "context" + "errors" "flag" "fmt" "io" @@ -29,7 +30,7 @@ type syncFlags struct { func (f *syncFlags) syncOptionsFromBundle(cmd *cobra.Command, args []string, b *bundle.Bundle) (*sync.SyncOptions, error) { if len(args) > 0 { - return nil, fmt.Errorf("SRC and DST are not configurable in the context of a bundle") + return nil, errors.New("SRC and DST are not configurable in the context of a bundle") } opts, err := files.GetSyncOptions(cmd.Context(), bundle.ReadOnly(b)) diff --git a/cmd/workspace/repos/overrides.go b/cmd/workspace/repos/overrides.go index aad38ecc7..561921623 100644 --- a/cmd/workspace/repos/overrides.go +++ b/cmd/workspace/repos/overrides.go @@ -2,6 +2,7 @@ package repos import ( "context" + "errors" "fmt" "strconv" @@ -153,7 +154,7 @@ func repoArgumentToRepoID(ctx context.Context, w *databricks.WorkspaceClient, ar args = append(args, id) } if len(args) != 1 { - return 0, fmt.Errorf("expected to have the id for the corresponding repo to access") + return 0, errors.New("expected to have the id for the corresponding repo to access") } // ---- End copy from cmd/workspace/repos/repos.go ---- diff --git a/cmd/workspace/secrets/put_secret.go b/cmd/workspace/secrets/put_secret.go index f24814f05..b446524f7 100644 --- a/cmd/workspace/secrets/put_secret.go +++ b/cmd/workspace/secrets/put_secret.go @@ -2,7 +2,7 @@ package secrets import ( "encoding/base64" - "fmt" + "errors" "io" "os" @@ -67,7 +67,7 @@ func newPutSecret() *cobra.Command { bytesValueChanged := cmd.Flags().Changed("bytes-value") stringValueChanged := cmd.Flags().Changed("string-value") if bytesValueChanged && stringValueChanged { - return fmt.Errorf("cannot specify both --bytes-value and --string-value") + return errors.New("cannot specify both --bytes-value and --string-value") } if cmd.Flags().Changed("json") { diff --git a/cmd/workspace/workspace/overrides.go b/cmd/workspace/workspace/overrides.go index 216e9b5d8..53438a764 100644 --- a/cmd/workspace/workspace/overrides.go +++ b/cmd/workspace/workspace/overrides.go @@ -36,7 +36,7 @@ func exportOverride(exportCmd *cobra.Command, exportReq *workspace.ExportRequest ctx := cmd.Context() w := root.WorkspaceClient(ctx) if len(args) != 1 { - return fmt.Errorf("expected to have the absolute path of the object or directory") + return errors.New("expected to have the absolute path of the object or directory") } exportReq.Path = args[0] diff --git a/integration/assumptions/main_test.go b/integration/assumptions/main_test.go deleted file mode 100644 index be2761385..000000000 --- a/integration/assumptions/main_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package assumptions_test - -import ( - "testing" - - "github.com/databricks/cli/integration/internal" -) - -// TestMain is the entrypoint executed by the test runner. -// See [internal.Main] for prerequisites for running integration tests. -func TestMain(m *testing.M) { - internal.Main(m) -} diff --git a/integration/bundle/bind_resource_test.go b/integration/bundle/bind_resource_test.go index 508aa3410..ba10190aa 100644 --- a/integration/bundle/bind_resource_test.go +++ b/integration/bundle/bind_resource_test.go @@ -1,9 +1,9 @@ package bundle_test import ( - "fmt" "os" "path/filepath" + "strconv" "testing" "github.com/databricks/cli/integration/internal/acc" @@ -35,7 +35,7 @@ func TestBindJobToExistingJob(t *testing.T) { }) ctx = env.Set(ctx, "BUNDLE_ROOT", bundleRoot) - c := testcli.NewRunner(t, ctx, "bundle", "deployment", "bind", "foo", fmt.Sprint(jobId), "--auto-approve") + c := testcli.NewRunner(t, ctx, "bundle", "deployment", "bind", "foo", strconv.FormatInt(jobId, 10), "--auto-approve") _, _, err := c.Run() require.NoError(t, err) @@ -53,7 +53,7 @@ func TestBindJobToExistingJob(t *testing.T) { JobId: jobId, }) require.NoError(t, err) - require.Equal(t, job.Settings.Name, fmt.Sprintf("test-job-basic-%s", uniqueId)) + require.Equal(t, job.Settings.Name, "test-job-basic-"+uniqueId) require.Contains(t, job.Settings.Tasks[0].SparkPythonTask.PythonFile, "hello_world.py") c = testcli.NewRunner(t, ctx, "bundle", "deployment", "unbind", "foo") @@ -71,7 +71,7 @@ func TestBindJobToExistingJob(t *testing.T) { JobId: jobId, }) require.NoError(t, err) - require.Equal(t, job.Settings.Name, fmt.Sprintf("test-job-basic-%s", uniqueId)) + require.Equal(t, job.Settings.Name, "test-job-basic-"+uniqueId) require.Contains(t, job.Settings.Tasks[0].SparkPythonTask.PythonFile, "hello_world.py") } @@ -96,7 +96,7 @@ func TestAbortBind(t *testing.T) { // Bind should fail because prompting is not possible. ctx = env.Set(ctx, "BUNDLE_ROOT", bundleRoot) ctx = env.Set(ctx, "TERM", "dumb") - c := testcli.NewRunner(t, ctx, "bundle", "deployment", "bind", "foo", fmt.Sprint(jobId)) + c := testcli.NewRunner(t, ctx, "bundle", "deployment", "bind", "foo", strconv.FormatInt(jobId, 10)) // Expect error suggesting to use --auto-approve _, _, err := c.Run() @@ -114,7 +114,7 @@ func TestAbortBind(t *testing.T) { }) require.NoError(t, err) - require.NotEqual(t, job.Settings.Name, fmt.Sprintf("test-job-basic-%s", uniqueId)) + require.NotEqual(t, job.Settings.Name, "test-job-basic-"+uniqueId) require.Contains(t, job.Settings.Tasks[0].NotebookTask.NotebookPath, "test") } @@ -143,7 +143,7 @@ func TestGenerateAndBind(t *testing.T) { ctx = env.Set(ctx, "BUNDLE_ROOT", bundleRoot) c := testcli.NewRunner(t, ctx, "bundle", "generate", "job", "--key", "test_job_key", - "--existing-job-id", fmt.Sprint(jobId), + "--existing-job-id", strconv.FormatInt(jobId, 10), "--config-dir", filepath.Join(bundleRoot, "resources"), "--source-dir", filepath.Join(bundleRoot, "src")) _, _, err = c.Run() @@ -157,7 +157,7 @@ func TestGenerateAndBind(t *testing.T) { require.Len(t, matches, 1) - c = testcli.NewRunner(t, ctx, "bundle", "deployment", "bind", "test_job_key", fmt.Sprint(jobId), "--auto-approve") + c = testcli.NewRunner(t, ctx, "bundle", "deployment", "bind", "test_job_key", strconv.FormatInt(jobId, 10), "--auto-approve") _, _, err = c.Run() require.NoError(t, err) diff --git a/integration/bundle/clusters_test.go b/integration/bundle/clusters_test.go index 449206208..b94b8365e 100644 --- a/integration/bundle/clusters_test.go +++ b/integration/bundle/clusters_test.go @@ -1,7 +1,6 @@ package bundle_test import ( - "fmt" "testing" "github.com/databricks/cli/integration/internal/acc" @@ -29,7 +28,7 @@ func TestDeployBundleWithCluster(t *testing.T) { t.Cleanup(func() { destroyBundle(t, ctx, root) - cluster, err := wt.W.Clusters.GetByClusterName(ctx, fmt.Sprintf("test-cluster-%s", uniqueId)) + cluster, err := wt.W.Clusters.GetByClusterName(ctx, "test-cluster-"+uniqueId) if err != nil { require.ErrorContains(t, err, "does not exist") } else { @@ -40,7 +39,7 @@ func TestDeployBundleWithCluster(t *testing.T) { deployBundle(t, ctx, root) // Cluster should exists after bundle deployment - cluster, err := wt.W.Clusters.GetByClusterName(ctx, fmt.Sprintf("test-cluster-%s", uniqueId)) + cluster, err := wt.W.Clusters.GetByClusterName(ctx, "test-cluster-"+uniqueId) require.NoError(t, err) require.NotNil(t, cluster) diff --git a/integration/bundle/dashboards_test.go b/integration/bundle/dashboards_test.go index 83b4b8b03..a96b657f6 100644 --- a/integration/bundle/dashboards_test.go +++ b/integration/bundle/dashboards_test.go @@ -40,7 +40,7 @@ func TestDashboards(t *testing.T) { // Load the dashboard by its ID and confirm its display name. dashboard, err := wt.W.Lakeview.GetByDashboardId(ctx, oi.ResourceId) require.NoError(t, err) - assert.Equal(t, fmt.Sprintf("test-dashboard-%s", uniqueID), dashboard.DisplayName) + assert.Equal(t, "test-dashboard-"+uniqueID, dashboard.DisplayName) // Make an out of band modification to the dashboard and confirm that it is detected. _, err = wt.W.Lakeview.Update(ctx, dashboards.UpdateDashboardRequest{ diff --git a/integration/bundle/deploy_to_shared_test.go b/integration/bundle/deploy_to_shared_test.go index b4395f4c6..387d3c67a 100644 --- a/integration/bundle/deploy_to_shared_test.go +++ b/integration/bundle/deploy_to_shared_test.go @@ -1,7 +1,6 @@ package bundle_test import ( - "fmt" "testing" "github.com/databricks/cli/integration/internal/acc" @@ -23,7 +22,7 @@ func TestDeployBasicToSharedWorkspacePath(t *testing.T) { "unique_id": uniqueId, "node_type_id": nodeTypeId, "spark_version": defaultSparkVersion, - "root_path": fmt.Sprintf("/Shared/%s", currentUser.UserName), + "root_path": "/Shared/" + currentUser.UserName, }) t.Cleanup(func() { diff --git a/integration/bundle/empty_bundle_test.go b/integration/bundle/empty_bundle_test.go index 1ab240d13..2c650cbef 100644 --- a/integration/bundle/empty_bundle_test.go +++ b/integration/bundle/empty_bundle_test.go @@ -1,7 +1,6 @@ package bundle_test import ( - "fmt" "os" "path/filepath" "testing" @@ -19,8 +18,7 @@ func TestEmptyBundleDeploy(t *testing.T) { f, err := os.Create(filepath.Join(tmpDir, "databricks.yml")) require.NoError(t, err) - bundleRoot := fmt.Sprintf(`bundle: - name: %s`, uuid.New().String()) + bundleRoot := "bundle:\n name: " + uuid.New().String() _, err = f.WriteString(bundleRoot) require.NoError(t, err) f.Close() diff --git a/integration/bundle/generate_job_test.go b/integration/bundle/generate_job_test.go index b68bb7d61..f3c4c7829 100644 --- a/integration/bundle/generate_job_test.go +++ b/integration/bundle/generate_job_test.go @@ -2,10 +2,10 @@ package bundle_test import ( "context" - "fmt" "os" "path" "path/filepath" + "strconv" "strings" "testing" @@ -37,7 +37,7 @@ func TestGenerateFromExistingJobAndDeploy(t *testing.T) { ctx = env.Set(ctx, "BUNDLE_ROOT", bundleRoot) c := testcli.NewRunner(t, ctx, "bundle", "generate", "job", - "--existing-job-id", fmt.Sprint(jobId), + "--existing-job-id", strconv.FormatInt(jobId, 10), "--config-dir", filepath.Join(bundleRoot, "resources"), "--source-dir", filepath.Join(bundleRoot, "src")) _, _, err := c.Run() @@ -55,7 +55,7 @@ func TestGenerateFromExistingJobAndDeploy(t *testing.T) { require.NoError(t, err) generatedYaml := string(data) require.Contains(t, generatedYaml, "notebook_task:") - require.Contains(t, generatedYaml, fmt.Sprintf("notebook_path: %s", filepath.Join("..", "src", "test.py"))) + require.Contains(t, generatedYaml, "notebook_path: "+filepath.Join("..", "src", "test.py")) require.Contains(t, generatedYaml, "task_key: test") require.Contains(t, generatedYaml, "new_cluster:") require.Contains(t, generatedYaml, "spark_version: 13.3.x-scala2.12") diff --git a/integration/bundle/generate_pipeline_test.go b/integration/bundle/generate_pipeline_test.go index 7843ec0c3..3565ab928 100644 --- a/integration/bundle/generate_pipeline_test.go +++ b/integration/bundle/generate_pipeline_test.go @@ -2,7 +2,6 @@ package bundle_test import ( "context" - "fmt" "os" "path" "path/filepath" @@ -36,7 +35,7 @@ func TestGenerateFromExistingPipelineAndDeploy(t *testing.T) { ctx = env.Set(ctx, "BUNDLE_ROOT", bundleRoot) c := testcli.NewRunner(t, ctx, "bundle", "generate", "pipeline", - "--existing-pipeline-id", fmt.Sprint(pipelineId), + "--existing-pipeline-id", pipelineId, "--config-dir", filepath.Join(bundleRoot, "resources"), "--source-dir", filepath.Join(bundleRoot, "src")) _, _, err := c.Run() @@ -65,9 +64,9 @@ func TestGenerateFromExistingPipelineAndDeploy(t *testing.T) { require.Contains(t, generatedYaml, "libraries:") require.Contains(t, generatedYaml, "- notebook:") - require.Contains(t, generatedYaml, fmt.Sprintf("path: %s", filepath.Join("..", "src", "notebook.py"))) + require.Contains(t, generatedYaml, "path: "+filepath.Join("..", "src", "notebook.py")) require.Contains(t, generatedYaml, "- file:") - require.Contains(t, generatedYaml, fmt.Sprintf("path: %s", filepath.Join("..", "src", "test.py"))) + require.Contains(t, generatedYaml, "path: "+filepath.Join("..", "src", "test.py")) deployBundle(t, ctx, bundleRoot) diff --git a/integration/bundle/init_test.go b/integration/bundle/init_test.go index f5c263ca3..87a3e30e5 100644 --- a/integration/bundle/init_test.go +++ b/integration/bundle/init_test.go @@ -66,7 +66,7 @@ func TestBundleInitOnMlopsStacks(t *testing.T) { // Assert that the README.md file was created contents := testutil.ReadFile(t, filepath.Join(tmpDir2, "repo_name", projectName, "README.md")) - assert.Contains(t, contents, fmt.Sprintf("# %s", projectName)) + assert.Contains(t, contents, "# "+projectName) // Validate the stack testutil.Chdir(t, filepath.Join(tmpDir2, "repo_name", projectName)) diff --git a/integration/bundle/main_test.go b/integration/bundle/main_test.go deleted file mode 100644 index 1c44d0aaf..000000000 --- a/integration/bundle/main_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package bundle_test - -import ( - "testing" - - "github.com/databricks/cli/integration/internal" -) - -// TestMain is the entrypoint executed by the test runner. -// See [internal.Main] for prerequisites for running integration tests. -func TestMain(m *testing.M) { - internal.Main(m) -} diff --git a/integration/cmd/alerts/main_test.go b/integration/cmd/alerts/main_test.go deleted file mode 100644 index 6987ade02..000000000 --- a/integration/cmd/alerts/main_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package alerts_test - -import ( - "testing" - - "github.com/databricks/cli/integration/internal" -) - -// TestMain is the entrypoint executed by the test runner. -// See [internal.Main] for prerequisites for running integration tests. -func TestMain(m *testing.M) { - internal.Main(m) -} diff --git a/integration/cmd/api/main_test.go b/integration/cmd/api/main_test.go deleted file mode 100644 index 70d021790..000000000 --- a/integration/cmd/api/main_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package api_test - -import ( - "testing" - - "github.com/databricks/cli/integration/internal" -) - -// TestMain is the entrypoint executed by the test runner. -// See [internal.Main] for prerequisites for running integration tests. -func TestMain(m *testing.M) { - internal.Main(m) -} diff --git a/integration/cmd/auth/describe_test.go b/integration/cmd/auth/describe_test.go index 41288dce6..f592bc276 100644 --- a/integration/cmd/auth/describe_test.go +++ b/integration/cmd/auth/describe_test.go @@ -2,7 +2,6 @@ package auth_test import ( "context" - "fmt" "testing" "github.com/databricks/cli/internal/testcli" @@ -21,14 +20,14 @@ func TestAuthDescribeSuccess(t *testing.T) { require.NoError(t, err) require.NotEmpty(t, outStr) - require.Contains(t, outStr, fmt.Sprintf("Host: %s", w.Config.Host)) + require.Contains(t, outStr, "Host: "+w.Config.Host) me, err := w.CurrentUser.Me(context.Background()) require.NoError(t, err) - require.Contains(t, outStr, fmt.Sprintf("User: %s", me.UserName)) - require.Contains(t, outStr, fmt.Sprintf("Authenticated with: %s", w.Config.AuthType)) + require.Contains(t, outStr, "User: "+me.UserName) + require.Contains(t, outStr, "Authenticated with: "+w.Config.AuthType) require.Contains(t, outStr, "Current configuration:") - require.Contains(t, outStr, fmt.Sprintf("✓ host: %s", w.Config.Host)) + require.Contains(t, outStr, "✓ host: "+w.Config.Host) require.Contains(t, outStr, "✓ profile: default") } @@ -47,6 +46,6 @@ func TestAuthDescribeFailure(t *testing.T) { w, err := databricks.NewWorkspaceClient(&databricks.Config{}) require.NoError(t, err) - require.Contains(t, outStr, fmt.Sprintf("✓ host: %s", w.Config.Host)) + require.Contains(t, outStr, "✓ host: "+w.Config.Host) require.Contains(t, outStr, "✓ profile: nonexistent (from --profile flag)") } diff --git a/integration/cmd/auth/main_test.go b/integration/cmd/auth/main_test.go deleted file mode 100644 index 97b1d740b..000000000 --- a/integration/cmd/auth/main_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package auth_test - -import ( - "testing" - - "github.com/databricks/cli/integration/internal" -) - -// TestMain is the entrypoint executed by the test runner. -// See [internal.Main] for prerequisites for running integration tests. -func TestMain(m *testing.M) { - internal.Main(m) -} diff --git a/integration/cmd/clusters/main_test.go b/integration/cmd/clusters/main_test.go deleted file mode 100644 index ccd5660e7..000000000 --- a/integration/cmd/clusters/main_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package clusters_test - -import ( - "testing" - - "github.com/databricks/cli/integration/internal" -) - -// TestMain is the entrypoint executed by the test runner. -// See [internal.Main] for prerequisites for running integration tests. -func TestMain(m *testing.M) { - internal.Main(m) -} diff --git a/integration/cmd/fs/completion_test.go b/integration/cmd/fs/completion_test.go index 88ce2fcc1..b13bf9d60 100644 --- a/integration/cmd/fs/completion_test.go +++ b/integration/cmd/fs/completion_test.go @@ -2,7 +2,6 @@ package fs_test import ( "context" - "fmt" "strings" "testing" @@ -24,6 +23,6 @@ func TestFsCompletion(t *testing.T) { setupCompletionFile(t, f) stdout, _ := testcli.RequireSuccessfulRun(t, ctx, "__complete", "fs", "ls", tmpDir+"/") - expectedOutput := fmt.Sprintf("%s/dir1/\n:2\n", tmpDir) + expectedOutput := tmpDir + "/dir1/\n:2\n" assert.Equal(t, expectedOutput, stdout.String()) } diff --git a/integration/cmd/fs/main_test.go b/integration/cmd/fs/main_test.go deleted file mode 100644 index b9402f0b2..000000000 --- a/integration/cmd/fs/main_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package fs_test - -import ( - "testing" - - "github.com/databricks/cli/integration/internal" -) - -// TestMain is the entrypoint executed by the test runner. -// See [internal.Main] for prerequisites for running integration tests. -func TestMain(m *testing.M) { - internal.Main(m) -} diff --git a/integration/cmd/jobs/jobs_test.go b/integration/cmd/jobs/jobs_test.go index b6bcfc5b3..7ebc135a3 100644 --- a/integration/cmd/jobs/jobs_test.go +++ b/integration/cmd/jobs/jobs_test.go @@ -3,7 +3,7 @@ package jobs_test import ( "context" "encoding/json" - "fmt" + "strconv" "testing" "github.com/databricks/cli/internal/testcli" @@ -20,5 +20,5 @@ func TestCreateJob(t *testing.T) { var output map[string]int err := json.Unmarshal(stdout.Bytes(), &output) require.NoError(t, err) - testcli.RequireSuccessfulRun(t, ctx, "jobs", "delete", fmt.Sprint(output["job_id"]), "--log-level=debug") + testcli.RequireSuccessfulRun(t, ctx, "jobs", "delete", strconv.Itoa(output["job_id"]), "--log-level=debug") } diff --git a/integration/cmd/jobs/main_test.go b/integration/cmd/jobs/main_test.go deleted file mode 100644 index 46369a526..000000000 --- a/integration/cmd/jobs/main_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package jobs_test - -import ( - "testing" - - "github.com/databricks/cli/integration/internal" -) - -// TestMain is the entrypoint executed by the test runner. -// See [internal.Main] for prerequisites for running integration tests. -func TestMain(m *testing.M) { - internal.Main(m) -} diff --git a/integration/cmd/main_test.go b/integration/cmd/main_test.go deleted file mode 100644 index a1a5586b6..000000000 --- a/integration/cmd/main_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package cmd_test - -import ( - "testing" - - "github.com/databricks/cli/integration/internal" -) - -// TestMain is the entrypoint executed by the test runner. -// See [internal.Main] for prerequisites for running integration tests. -func TestMain(m *testing.M) { - internal.Main(m) -} diff --git a/integration/cmd/repos/main_test.go b/integration/cmd/repos/main_test.go deleted file mode 100644 index 7eaa174bc..000000000 --- a/integration/cmd/repos/main_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package repos_test - -import ( - "testing" - - "github.com/databricks/cli/integration/internal" -) - -// TestMain is the entrypoint executed by the test runner. -// See [internal.Main] for prerequisites for running integration tests. -func TestMain(m *testing.M) { - internal.Main(m) -} diff --git a/integration/cmd/secrets/main_test.go b/integration/cmd/secrets/main_test.go deleted file mode 100644 index a44d30671..000000000 --- a/integration/cmd/secrets/main_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package secrets_test - -import ( - "testing" - - "github.com/databricks/cli/integration/internal" -) - -// TestMain is the entrypoint executed by the test runner. -// See [internal.Main] for prerequisites for running integration tests. -func TestMain(m *testing.M) { - internal.Main(m) -} diff --git a/integration/cmd/storage_credentials/main_test.go b/integration/cmd/storage_credentials/main_test.go deleted file mode 100644 index 14d00d966..000000000 --- a/integration/cmd/storage_credentials/main_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package storage_credentials_test - -import ( - "testing" - - "github.com/databricks/cli/integration/internal" -) - -// TestMain is the entrypoint executed by the test runner. -// See [internal.Main] for prerequisites for running integration tests. -func TestMain(m *testing.M) { - internal.Main(m) -} diff --git a/integration/cmd/sync/main_test.go b/integration/cmd/sync/main_test.go deleted file mode 100644 index 8d9f3ca25..000000000 --- a/integration/cmd/sync/main_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package sync_test - -import ( - "testing" - - "github.com/databricks/cli/integration/internal" -) - -// TestMain is the entrypoint executed by the test runner. -// See [internal.Main] for prerequisites for running integration tests. -func TestMain(m *testing.M) { - internal.Main(m) -} diff --git a/integration/cmd/sync/sync_test.go b/integration/cmd/sync/sync_test.go index 6f58b7e42..632497054 100644 --- a/integration/cmd/sync/sync_test.go +++ b/integration/cmd/sync/sync_test.go @@ -151,10 +151,7 @@ func (a *syncTest) remoteFileContent(ctx context.Context, relativePath, expected filePath := path.Join(a.remoteRoot, relativePath) // Remove leading "/" so we can use it in the URL. - urlPath := fmt.Sprintf( - "/api/2.0/workspace-files/%s", - strings.TrimLeft(filePath, "/"), - ) + urlPath := "/api/2.0/workspace-files/" + strings.TrimLeft(filePath, "/") apiClient, err := client.New(a.w.Config) require.NoError(a.t, err) diff --git a/integration/cmd/version/main_test.go b/integration/cmd/version/main_test.go deleted file mode 100644 index 4aa5e046a..000000000 --- a/integration/cmd/version/main_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package version_test - -import ( - "testing" - - "github.com/databricks/cli/integration/internal" -) - -// TestMain is the entrypoint executed by the test runner. -// See [internal.Main] for prerequisites for running integration tests. -func TestMain(m *testing.M) { - internal.Main(m) -} diff --git a/integration/cmd/workspace/main_test.go b/integration/cmd/workspace/main_test.go deleted file mode 100644 index 40d140eac..000000000 --- a/integration/cmd/workspace/main_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package workspace_test - -import ( - "testing" - - "github.com/databricks/cli/integration/internal" -) - -// TestMain is the entrypoint executed by the test runner. -// See [internal.Main] for prerequisites for running integration tests. -func TestMain(m *testing.M) { - internal.Main(m) -} diff --git a/integration/cmd/workspace/workspace_test.go b/integration/cmd/workspace/workspace_test.go index 4edbbfc83..c376a87d2 100644 --- a/integration/cmd/workspace/workspace_test.go +++ b/integration/cmd/workspace/workspace_test.go @@ -114,7 +114,7 @@ func TestExportDir(t *testing.T) { require.NoError(t, err) expectedLogs := strings.Join([]string{ - fmt.Sprintf("Exporting files from %s", sourceDir), + "Exporting files from " + sourceDir, fmt.Sprintf("%s -> %s", path.Join(sourceDir, "a/b/c/file-b"), filepath.Join(targetDir, "a/b/c/file-b")), fmt.Sprintf("%s -> %s", path.Join(sourceDir, "file-a"), filepath.Join(targetDir, "file-a")), fmt.Sprintf("%s -> %s", path.Join(sourceDir, "pyNotebook"), filepath.Join(targetDir, "pyNotebook.py")), @@ -185,7 +185,7 @@ func TestImportDir(t *testing.T) { stdout, stderr := testcli.RequireSuccessfulRun(t, ctx, "workspace", "import-dir", "./testdata/import_dir", targetDir, "--log-level=debug") expectedLogs := strings.Join([]string{ - fmt.Sprintf("Importing files from %s", "./testdata/import_dir"), + "Importing files from " + "./testdata/import_dir", fmt.Sprintf("%s -> %s", filepath.FromSlash("a/b/c/file-b"), path.Join(targetDir, "a/b/c/file-b")), fmt.Sprintf("%s -> %s", filepath.FromSlash("file-a"), path.Join(targetDir, "file-a")), fmt.Sprintf("%s -> %s", filepath.FromSlash("jupyterNotebook.ipynb"), path.Join(targetDir, "jupyterNotebook")), diff --git a/integration/enforce_convention_test.go b/integration/enforce_convention_test.go deleted file mode 100644 index cc822a6a3..000000000 --- a/integration/enforce_convention_test.go +++ /dev/null @@ -1,116 +0,0 @@ -package integration - -import ( - "go/parser" - "go/token" - "os" - "path/filepath" - "strings" - "testing" - "text/template" - - "golang.org/x/exp/maps" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -type packageInfo struct { - Name string - Files []string -} - -func enumeratePackages(t *testing.T) map[string]packageInfo { - pkgmap := make(map[string]packageInfo) - err := filepath.Walk(".", func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - - // Skip files. - if !info.IsDir() { - return nil - } - - // Skip the root directory and the "internal" directory. - if path == "." || strings.HasPrefix(path, "internal") { - return nil - } - - fset := token.NewFileSet() - pkgs, err := parser.ParseDir(fset, path, nil, parser.ParseComments) - require.NoError(t, err) - if len(pkgs) == 0 { - return nil - } - - // Expect one package per directory. - require.Len(t, pkgs, 1, "Directory %s contains more than one package", path) - v := maps.Values(pkgs)[0] - - // Record the package. - pkgmap[path] = packageInfo{ - Name: v.Name, - Files: maps.Keys(v.Files), - } - return nil - }) - require.NoError(t, err) - return pkgmap -} - -// TestEnforcePackageNames checks that all integration test package names use the "_test" suffix. -// We enforce this package name to avoid package name aliasing. -func TestEnforcePackageNames(t *testing.T) { - pkgmap := enumeratePackages(t) - for _, pkg := range pkgmap { - assert.True(t, strings.HasSuffix(pkg.Name, "_test"), "Package name %s does not end with _test", pkg.Name) - } -} - -var mainTestTemplate = template.Must(template.New("main_test").Parse( - `package {{.Name}} - -import ( - "testing" - - "github.com/databricks/cli/integration/internal" -) - -// TestMain is the entrypoint executed by the test runner. -// See [internal.Main] for prerequisites for running integration tests. -func TestMain(m *testing.M) { - internal.Main(m) -} -`)) - -func TestEnforceMainTest(t *testing.T) { - pkgmap := enumeratePackages(t) - for dir, pkg := range pkgmap { - found := false - for _, file := range pkg.Files { - if filepath.Base(file) == "main_test.go" { - found = true - break - } - } - - // Expect a "main_test.go" file in each package. - assert.True(t, found, "Directory %s does not contain a main_test.go file", dir) - } -} - -func TestWriteMainTest(t *testing.T) { - t.Skip("Uncomment to write main_test.go files") - - pkgmap := enumeratePackages(t) - for dir, pkg := range pkgmap { - // Write a "main_test.go" file to the package. - // This file is required to run the integration tests. - f, err := os.Create(filepath.Join(dir, "main_test.go")) - require.NoError(t, err) - defer f.Close() - err = mainTestTemplate.Execute(f, pkg) - require.NoError(t, err) - } -} diff --git a/integration/internal/acc/fixtures.go b/integration/internal/acc/fixtures.go index cd867fb3a..2367d228f 100644 --- a/integration/internal/acc/fixtures.go +++ b/integration/internal/acc/fixtures.go @@ -45,7 +45,7 @@ func TemporaryDbfsDir(t *WorkspaceT, name ...string) string { // Prefix the name with "integration-test-" to make it easier to identify. name = append([]string{"integration-test-"}, name...) - path := fmt.Sprintf("/tmp/%s", testutil.RandomName(name...)) + path := "/tmp/" + testutil.RandomName(name...) t.Logf("Creating DBFS directory %s", path) err := t.W.Dbfs.MkdirsByPath(ctx, path) diff --git a/integration/internal/main.go b/integration/internal/main.go deleted file mode 100644 index 6aa2a4c93..000000000 --- a/integration/internal/main.go +++ /dev/null @@ -1,22 +0,0 @@ -package internal - -import ( - "fmt" - "os" - "testing" - - "github.com/databricks/cli/integration/internal/acc" -) - -// Main is the entry point for integration tests. -// We use this for all integration tests defined in this subtree to ensure -// they are not inadvertently executed when calling `go test ./...`. -func Main(m *testing.M) { - value := os.Getenv("CLOUD_ENV") - if value == "" && !acc.IsInDebug() { - fmt.Println("CLOUD_ENV is not set, skipping integration tests") - return - } - - m.Run() -} diff --git a/integration/libs/filer/main_test.go b/integration/libs/filer/main_test.go deleted file mode 100644 index ca866d952..000000000 --- a/integration/libs/filer/main_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package filer_test - -import ( - "testing" - - "github.com/databricks/cli/integration/internal" -) - -// TestMain is the entrypoint executed by the test runner. -// See [internal.Main] for prerequisites for running integration tests. -func TestMain(m *testing.M) { - internal.Main(m) -} diff --git a/integration/libs/git/main_test.go b/integration/libs/git/main_test.go deleted file mode 100644 index 5d68e0851..000000000 --- a/integration/libs/git/main_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package git_test - -import ( - "testing" - - "github.com/databricks/cli/integration/internal" -) - -// TestMain is the entrypoint executed by the test runner. -// See [internal.Main] for prerequisites for running integration tests. -func TestMain(m *testing.M) { - internal.Main(m) -} diff --git a/integration/libs/locker/main_test.go b/integration/libs/locker/main_test.go deleted file mode 100644 index 33a883768..000000000 --- a/integration/libs/locker/main_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package locker_test - -import ( - "testing" - - "github.com/databricks/cli/integration/internal" -) - -// TestMain is the entrypoint executed by the test runner. -// See [internal.Main] for prerequisites for running integration tests. -func TestMain(m *testing.M) { - internal.Main(m) -} diff --git a/integration/libs/tags/main_test.go b/integration/libs/tags/main_test.go deleted file mode 100644 index 4eaf54a20..000000000 --- a/integration/libs/tags/main_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package tags_test - -import ( - "testing" - - "github.com/databricks/cli/integration/internal" -) - -// TestMain is the entrypoint executed by the test runner. -// See [internal.Main] for prerequisites for running integration tests. -func TestMain(m *testing.M) { - internal.Main(m) -} diff --git a/integration/python/main_test.go b/integration/python/main_test.go deleted file mode 100644 index b35da21e1..000000000 --- a/integration/python/main_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package python_test - -import ( - "testing" - - "github.com/databricks/cli/integration/internal" -) - -// TestMain is the entrypoint executed by the test runner. -// See [internal.Main] for prerequisites for running integration tests. -func TestMain(m *testing.M) { - internal.Main(m) -} diff --git a/integration/python/python_tasks_test.go b/integration/python/python_tasks_test.go index 9411afb13..39b38f890 100644 --- a/integration/python/python_tasks_test.go +++ b/integration/python/python_tasks_test.go @@ -5,7 +5,6 @@ import ( "context" "encoding/base64" "encoding/json" - "fmt" "os" "path" "slices" @@ -244,8 +243,8 @@ func prepareDBFSFiles(t *testing.T) *testFiles { return &testFiles{ w: w, pyNotebookPath: path.Join(baseDir, "test.py"), - sparkPythonPath: fmt.Sprintf("dbfs:%s", path.Join(baseDir, "spark.py")), - wheelPath: fmt.Sprintf("dbfs:%s", path.Join(baseDir, "my_test_code-0.0.1-py3-none-any.whl")), + sparkPythonPath: "dbfs:" + path.Join(baseDir, "spark.py"), + wheelPath: "dbfs:" + path.Join(baseDir, "my_test_code-0.0.1-py3-none-any.whl"), } } @@ -268,7 +267,7 @@ func GenerateNotebookTasks(notebookPath string, versions []string, nodeTypeId st tasks := make([]jobs.SubmitTask, 0) for i := range versions { task := jobs.SubmitTask{ - TaskKey: fmt.Sprintf("notebook_%s", strings.ReplaceAll(versions[i], ".", "_")), + TaskKey: "notebook_" + strings.ReplaceAll(versions[i], ".", "_"), NotebookTask: &jobs.NotebookTask{ NotebookPath: notebookPath, }, @@ -289,7 +288,7 @@ func GenerateSparkPythonTasks(notebookPath string, versions []string, nodeTypeId tasks := make([]jobs.SubmitTask, 0) for i := range versions { task := jobs.SubmitTask{ - TaskKey: fmt.Sprintf("spark_%s", strings.ReplaceAll(versions[i], ".", "_")), + TaskKey: "spark_" + strings.ReplaceAll(versions[i], ".", "_"), SparkPythonTask: &jobs.SparkPythonTask{ PythonFile: notebookPath, }, @@ -310,7 +309,7 @@ func GenerateWheelTasks(wheelPath string, versions []string, nodeTypeId string) tasks := make([]jobs.SubmitTask, 0) for i := range versions { task := jobs.SubmitTask{ - TaskKey: fmt.Sprintf("whl_%s", strings.ReplaceAll(versions[i], ".", "_")), + TaskKey: "whl_" + strings.ReplaceAll(versions[i], ".", "_"), PythonWheelTask: &jobs.PythonWheelTask{ PackageName: "my_test_code", EntryPoint: "run", diff --git a/internal/testutil/file.go b/internal/testutil/file.go index 538a3c20a..476c4123a 100644 --- a/internal/testutil/file.go +++ b/internal/testutil/file.go @@ -4,6 +4,7 @@ import ( "os" "path/filepath" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -52,3 +53,31 @@ func ReadFile(t TestingT, path string) string { return string(b) } + +// StatFile returns the file info for a file. +func StatFile(t TestingT, path string) os.FileInfo { + fi, err := os.Stat(path) + require.NoError(t, err) + + return fi +} + +// AssertFileContents asserts that the file at path has the expected content. +func AssertFileContents(t TestingT, path, expected string) bool { + actual := ReadFile(t, path) + return assert.Equal(t, expected, actual) +} + +// AssertFilePermissions asserts that the file at path has the expected permissions. +func AssertFilePermissions(t TestingT, path string, expected os.FileMode) bool { + fi := StatFile(t, path) + assert.False(t, fi.Mode().IsDir(), "expected a file, got a directory") + return assert.Equal(t, expected, fi.Mode().Perm(), "expected 0%o, got 0%o", expected, fi.Mode().Perm()) +} + +// AssertDirPermissions asserts that the file at path has the expected permissions. +func AssertDirPermissions(t TestingT, path string, expected os.FileMode) bool { + fi := StatFile(t, path) + assert.True(t, fi.Mode().IsDir(), "expected a directory, got a file") + return assert.Equal(t, expected, fi.Mode().Perm(), "expected 0%o, got 0%o", expected, fi.Mode().Perm()) +} diff --git a/libs/auth/oauth.go b/libs/auth/oauth.go index 026c45468..1037a5a85 100644 --- a/libs/auth/oauth.go +++ b/libs/auth/oauth.go @@ -107,7 +107,7 @@ func (a *PersistentAuth) Load(ctx context.Context) (*oauth2.Token, error) { func (a *PersistentAuth) ProfileName() string { if a.AccountID != "" { - return fmt.Sprintf("ACCOUNT-%s", a.AccountID) + return "ACCOUNT-" + a.AccountID } host := strings.TrimPrefix(a.Host, "https://") split := strings.Split(host, ".") @@ -210,12 +210,12 @@ func (a *PersistentAuth) oidcEndpoints(ctx context.Context) (*oauthAuthorization prefix := a.key() if a.AccountID != "" { return &oauthAuthorizationServer{ - AuthorizationEndpoint: fmt.Sprintf("%s/v1/authorize", prefix), - TokenEndpoint: fmt.Sprintf("%s/v1/token", prefix), + AuthorizationEndpoint: prefix + "/v1/authorize", + TokenEndpoint: prefix + "/v1/token", }, nil } var oauthEndpoints oauthAuthorizationServer - oidc := fmt.Sprintf("%s/oidc/.well-known/oauth-authorization-server", prefix) + oidc := prefix + "/oidc/.well-known/oauth-authorization-server" err := a.http.Do(ctx, "GET", oidc, httpclient.WithResponseUnmarshal(&oauthEndpoints)) if err != nil { return nil, fmt.Errorf("fetch .well-known: %w", err) @@ -247,7 +247,7 @@ func (a *PersistentAuth) oauth2Config(ctx context.Context) (*oauth2.Config, erro TokenURL: endpoints.TokenEndpoint, AuthStyle: oauth2.AuthStyleInParams, }, - RedirectURL: fmt.Sprintf("http://%s", appRedirectAddr), + RedirectURL: "http://" + appRedirectAddr, Scopes: scopes, }, nil } @@ -258,7 +258,7 @@ func (a *PersistentAuth) oauth2Config(ctx context.Context) (*oauth2.Config, erro func (a *PersistentAuth) key() string { a.Host = strings.TrimSuffix(a.Host, "/") if !strings.HasPrefix(a.Host, "http") { - a.Host = fmt.Sprintf("https://%s", a.Host) + a.Host = "https://" + a.Host } if a.AccountID != "" { return fmt.Sprintf("%s/oidc/accounts/%s", a.Host, a.AccountID) diff --git a/libs/auth/oauth_test.go b/libs/auth/oauth_test.go index 837ff4fee..6c3b9bf47 100644 --- a/libs/auth/oauth_test.go +++ b/libs/auth/oauth_test.go @@ -112,7 +112,7 @@ func TestLoadRefresh(t *testing.T) { }, }.ApplyClient(t, func(ctx context.Context, c *client.DatabricksClient) { ctx = useInsecureOAuthHttpClientForTests(ctx) - expectedKey := fmt.Sprintf("%s/oidc/accounts/xyz", c.Config.Host) + expectedKey := c.Config.Host + "/oidc/accounts/xyz" p := &PersistentAuth{ Host: c.Config.Host, AccountID: "xyz", @@ -149,7 +149,7 @@ func TestChallenge(t *testing.T) { }, }.ApplyClient(t, func(ctx context.Context, c *client.DatabricksClient) { ctx = useInsecureOAuthHttpClientForTests(ctx) - expectedKey := fmt.Sprintf("%s/oidc/accounts/xyz", c.Config.Host) + expectedKey := c.Config.Host + "/oidc/accounts/xyz" browserOpened := make(chan string) p := &PersistentAuth{ diff --git a/libs/cmdio/error_event.go b/libs/cmdio/error_event.go index 933f9d0d0..62897995b 100644 --- a/libs/cmdio/error_event.go +++ b/libs/cmdio/error_event.go @@ -1,13 +1,11 @@ package cmdio -import "fmt" - type ErrorEvent struct { Error string `json:"error"` } func (event *ErrorEvent) String() string { - return fmt.Sprintf("Error: %s", event.Error) + return "Error: " + event.Error } func (event *ErrorEvent) IsInplaceSupported() bool { diff --git a/libs/cmdio/logger.go b/libs/cmdio/logger.go index 7bc95e9a5..7edad5bf0 100644 --- a/libs/cmdio/logger.go +++ b/libs/cmdio/logger.go @@ -4,6 +4,7 @@ import ( "bufio" "context" "encoding/json" + "errors" "fmt" "io" "os" @@ -124,7 +125,7 @@ func splitAtLastNewLine(s string) (string, string) { func (l *Logger) AskSelect(question string, choices []string) (string, error) { if l.Mode == flags.ModeJson { - return "", fmt.Errorf("question prompts are not supported in json mode") + return "", errors.New("question prompts are not supported in json mode") } // Promptui does not support multiline prompts. So we split the question. @@ -140,7 +141,7 @@ func (l *Logger) AskSelect(question string, choices []string) (string, error) { HideHelp: true, Templates: &promptui.SelectTemplates{ Label: "{{.}}: ", - Selected: fmt.Sprintf("%s: {{.}}", last), + Selected: last + ": {{.}}", }, } @@ -153,7 +154,7 @@ func (l *Logger) AskSelect(question string, choices []string) (string, error) { func (l *Logger) Ask(question, defaultVal string) (string, error) { if l.Mode == flags.ModeJson { - return "", fmt.Errorf("question prompts are not supported in json mode") + return "", errors.New("question prompts are not supported in json mode") } // Add default value to question prompt. diff --git a/libs/databrickscfg/cfgpickers/clusters.go b/libs/databrickscfg/cfgpickers/clusters.go index 6ae7d99c6..e27d13690 100644 --- a/libs/databrickscfg/cfgpickers/clusters.go +++ b/libs/databrickscfg/cfgpickers/clusters.go @@ -33,7 +33,7 @@ func GetRuntimeVersion(cluster compute.ClusterDetails) (string, bool) { match = dbrSnapshotVersionRegex.FindStringSubmatch(cluster.SparkVersion) if len(match) > 1 { // we return 14.999 for 14.x-snapshot for semver.Compare() to work properly - return fmt.Sprintf("%s.999", match[1]), true + return match[1] + ".999", true } return "", false } diff --git a/libs/databrickscfg/loader.go b/libs/databrickscfg/loader.go index 12a516c59..84c8398bf 100644 --- a/libs/databrickscfg/loader.go +++ b/libs/databrickscfg/loader.go @@ -19,7 +19,7 @@ var errNoMatchingProfiles = errors.New("no matching config profiles found") type errMultipleProfiles []string func (e errMultipleProfiles) Error() string { - return fmt.Sprintf("multiple profiles matched: %s", strings.Join(e, ", ")) + return "multiple profiles matched: " + strings.Join(e, ", ") } func findMatchingProfile(configFile *config.File, matcher func(*ini.Section) bool) (*ini.Section, error) { diff --git a/libs/dyn/convert/normalize.go b/libs/dyn/convert/normalize.go index 31cd8b6e3..ee26d5afc 100644 --- a/libs/dyn/convert/normalize.go +++ b/libs/dyn/convert/normalize.go @@ -97,7 +97,7 @@ func (n normalizeOptions) normalizeStruct(typ reflect.Type, src dyn.Value, seen if !pv.IsAnchor() { diags = diags.Append(diag.Diagnostic{ Severity: diag.Warning, - Summary: fmt.Sprintf("unknown field: %s", pk.MustString()), + Summary: "unknown field: " + pk.MustString(), // Show all locations the unknown field is defined at. Locations: pk.Locations(), Paths: []dyn.Path{path}, diff --git a/libs/dyn/jsonloader/json.go b/libs/dyn/jsonloader/json.go index 3f2dc859f..708fc401f 100644 --- a/libs/dyn/jsonloader/json.go +++ b/libs/dyn/jsonloader/json.go @@ -3,6 +3,7 @@ package jsonloader import ( "bytes" "encoding/json" + "errors" "fmt" "io" @@ -20,7 +21,7 @@ func LoadJSON(data []byte, source string) (dyn.Value, error) { value, err := decodeValue(decoder, &offset) if err != nil { if err == io.EOF { - err = fmt.Errorf("unexpected end of JSON input") + err = errors.New("unexpected end of JSON input") } return dyn.InvalidValue, fmt.Errorf("error decoding JSON at %s: %v", value.Location(), err) } @@ -57,7 +58,7 @@ func decodeValue(decoder *json.Decoder, o *Offset) (dyn.Value, error) { } key, ok := keyToken.(string) if !ok { - return invalidValueWithLocation(decoder, o), fmt.Errorf("expected string for object key") + return invalidValueWithLocation(decoder, o), errors.New("expected string for object key") } // Get the offset of the key by subtracting the length of the key and the '"' character diff --git a/libs/dyn/location.go b/libs/dyn/location.go index 961d2f121..d2b2ad596 100644 --- a/libs/dyn/location.go +++ b/libs/dyn/location.go @@ -1,6 +1,7 @@ package dyn import ( + "errors" "fmt" "path/filepath" ) @@ -17,7 +18,7 @@ func (l Location) String() string { func (l Location) Directory() (string, error) { if l.File == "" { - return "", fmt.Errorf("no file in location") + return "", errors.New("no file in location") } return filepath.Dir(l.File), nil diff --git a/libs/dyn/mapping_test.go b/libs/dyn/mapping_test.go index 67144ae55..d0347d22a 100644 --- a/libs/dyn/mapping_test.go +++ b/libs/dyn/mapping_test.go @@ -1,7 +1,7 @@ package dyn_test import ( - "fmt" + "strconv" "testing" "github.com/databricks/cli/libs/dyn" @@ -186,13 +186,13 @@ func TestMappingClone(t *testing.T) { func TestMappingMerge(t *testing.T) { var m1 dyn.Mapping for i := range 10 { - err := m1.Set(dyn.V(fmt.Sprintf("%d", i)), dyn.V(i)) + err := m1.Set(dyn.V(strconv.Itoa(i)), dyn.V(i)) require.NoError(t, err) } var m2 dyn.Mapping for i := 5; i < 15; i++ { - err := m2.Set(dyn.V(fmt.Sprintf("%d", i)), dyn.V(i)) + err := m2.Set(dyn.V(strconv.Itoa(i)), dyn.V(i)) require.NoError(t, err) } diff --git a/libs/dyn/merge/override_test.go b/libs/dyn/merge/override_test.go index ea161d27c..d9d3f3983 100644 --- a/libs/dyn/merge/override_test.go +++ b/libs/dyn/merge/override_test.go @@ -1,7 +1,7 @@ package merge import ( - "fmt" + "errors" "testing" "time" @@ -373,7 +373,7 @@ func TestOverride_Primitive(t *testing.T) { if modified { t.Run(tc.name+" - visitor has error", func(t *testing.T) { - _, visitor := createVisitor(visitorOpts{error: fmt.Errorf("unexpected change in test")}) + _, visitor := createVisitor(visitorOpts{error: errors.New("unexpected change in test")}) _, err := override(dyn.EmptyPath, tc.left, tc.right, visitor) assert.EqualError(t, err, "unexpected change in test") diff --git a/libs/dyn/path_string_test.go b/libs/dyn/path_string_test.go index 0d64bf110..eb1816d7d 100644 --- a/libs/dyn/path_string_test.go +++ b/libs/dyn/path_string_test.go @@ -1,7 +1,7 @@ package dyn_test import ( - "fmt" + "errors" "testing" . "github.com/databricks/cli/libs/dyn" @@ -52,31 +52,31 @@ func TestNewPathFromString(t *testing.T) { }, { input: "foo[123", - err: fmt.Errorf("invalid path: foo[123"), + err: errors.New("invalid path: foo[123"), }, { input: "foo[123]]", - err: fmt.Errorf("invalid path: foo[123]]"), + err: errors.New("invalid path: foo[123]]"), }, { input: "foo[[123]", - err: fmt.Errorf("invalid path: foo[[123]"), + err: errors.New("invalid path: foo[[123]"), }, { input: "foo[[123]]", - err: fmt.Errorf("invalid path: foo[[123]]"), + err: errors.New("invalid path: foo[[123]]"), }, { input: "foo[foo]", - err: fmt.Errorf("invalid path: foo[foo]"), + err: errors.New("invalid path: foo[foo]"), }, { input: "foo..bar", - err: fmt.Errorf("invalid path: foo..bar"), + err: errors.New("invalid path: foo..bar"), }, { input: "foo.bar.", - err: fmt.Errorf("invalid path: foo.bar."), + err: errors.New("invalid path: foo.bar."), }, { // Every component may have a leading dot. @@ -86,7 +86,7 @@ func TestNewPathFromString(t *testing.T) { { // But after an index there must be a dot. input: "foo[1]bar", - err: fmt.Errorf("invalid path: foo[1]bar"), + err: errors.New("invalid path: foo[1]bar"), }, } { p, err := NewPathFromString(tc.input) diff --git a/libs/dyn/visit_map_test.go b/libs/dyn/visit_map_test.go index 3c2908c4b..ad091743d 100644 --- a/libs/dyn/visit_map_test.go +++ b/libs/dyn/visit_map_test.go @@ -1,6 +1,7 @@ package dyn_test import ( + "errors" "fmt" "testing" @@ -71,7 +72,7 @@ func TestMapFuncOnMap(t *testing.T) { }, vbar.AsAny()) // Return error from map function. - ref := fmt.Errorf("error") + ref := errors.New("error") verr, err := dyn.MapByPath(vin, dyn.NewPath(dyn.Key("foo")), func(_ dyn.Path, v dyn.Value) (dyn.Value, error) { return dyn.InvalidValue, ref }) @@ -137,7 +138,7 @@ func TestMapFuncOnSequence(t *testing.T) { assert.Equal(t, []any{42, 45}, v1.AsAny()) // Return error from map function. - ref := fmt.Errorf("error") + ref := errors.New("error") verr, err := dyn.MapByPath(vin, dyn.NewPath(dyn.Index(0)), func(_ dyn.Path, v dyn.Value) (dyn.Value, error) { return dyn.InvalidValue, ref }) @@ -211,7 +212,7 @@ func TestMapForeachOnMapError(t *testing.T) { }) // Check that an error from the map function propagates. - ref := fmt.Errorf("error") + ref := errors.New("error") _, err := dyn.Map(vin, ".", dyn.Foreach(func(_ dyn.Path, v dyn.Value) (dyn.Value, error) { return dyn.InvalidValue, ref })) @@ -255,7 +256,7 @@ func TestMapForeachOnSequenceError(t *testing.T) { }) // Check that an error from the map function propagates. - ref := fmt.Errorf("error") + ref := errors.New("error") _, err := dyn.Map(vin, ".", dyn.Foreach(func(_ dyn.Path, v dyn.Value) (dyn.Value, error) { return dyn.InvalidValue, ref })) diff --git a/libs/dyn/yamlsaver/saver.go b/libs/dyn/yamlsaver/saver.go index 7398e2594..a7838ff36 100644 --- a/libs/dyn/yamlsaver/saver.go +++ b/libs/dyn/yamlsaver/saver.go @@ -123,9 +123,9 @@ func (s *saver) toYamlNodeWithStyle(v dyn.Value, style yaml.Style) (*yaml.Node, } return &yaml.Node{Kind: yaml.ScalarNode, Value: v.MustString(), Style: style}, nil case dyn.KindBool: - return &yaml.Node{Kind: yaml.ScalarNode, Value: fmt.Sprint(v.MustBool()), Style: style}, nil + return &yaml.Node{Kind: yaml.ScalarNode, Value: strconv.FormatBool(v.MustBool()), Style: style}, nil case dyn.KindInt: - return &yaml.Node{Kind: yaml.ScalarNode, Value: fmt.Sprint(v.MustInt()), Style: style}, nil + return &yaml.Node{Kind: yaml.ScalarNode, Value: strconv.FormatInt(v.MustInt(), 10), Style: style}, nil case dyn.KindFloat: return &yaml.Node{Kind: yaml.ScalarNode, Value: fmt.Sprint(v.MustFloat()), Style: style}, nil case dyn.KindTime: diff --git a/libs/env/context.go b/libs/env/context.go index af4d1afa0..37b76147a 100644 --- a/libs/env/context.go +++ b/libs/env/context.go @@ -65,7 +65,7 @@ func Set(ctx context.Context, key, value string) context.Context { return setMap(ctx, m) } -func homeEnvVar() string { +func HomeEnvVar() string { if runtime.GOOS == "windows" { return "USERPROFILE" } @@ -73,14 +73,14 @@ func homeEnvVar() string { } func WithUserHomeDir(ctx context.Context, value string) context.Context { - return Set(ctx, homeEnvVar(), value) + return Set(ctx, HomeEnvVar(), value) } // ErrNoHomeEnv indicates the absence of $HOME env variable var ErrNoHomeEnv = errors.New("$HOME is not set") func UserHomeDir(ctx context.Context) (string, error) { - home := Get(ctx, homeEnvVar()) + home := Get(ctx, HomeEnvVar()) if home == "" { return "", ErrNoHomeEnv } diff --git a/libs/errs/aggregate_test.go b/libs/errs/aggregate_test.go index 1af57e099..216276a06 100644 --- a/libs/errs/aggregate_test.go +++ b/libs/errs/aggregate_test.go @@ -1,16 +1,16 @@ package errs import ( - "fmt" + "errors" "testing" "github.com/stretchr/testify/assert" ) func TestFromManyErrors(t *testing.T) { - e1 := fmt.Errorf("Error 1") - e2 := fmt.Errorf("Error 2") - e3 := fmt.Errorf("Error 3") + e1 := errors.New("Error 1") + e2 := errors.New("Error 2") + e3 := errors.New("Error 3") err := FromMany(e1, e2, e3) assert.ErrorIs(t, err, e1) @@ -23,9 +23,9 @@ Error 3`, err.Error()) } func TestFromManyErrorsWihtNil(t *testing.T) { - e1 := fmt.Errorf("Error 1") + e1 := errors.New("Error 1") var e2 error = nil - e3 := fmt.Errorf("Error 3") + e3 := errors.New("Error 3") err := FromMany(e1, e2, e3) assert.ErrorIs(t, err, e1) diff --git a/libs/exec/shell_cmd.go b/libs/exec/shell_cmd.go index 164d09739..057ed06a4 100644 --- a/libs/exec/shell_cmd.go +++ b/libs/exec/shell_cmd.go @@ -2,7 +2,6 @@ package exec import ( "errors" - "fmt" osexec "os/exec" ) @@ -18,7 +17,7 @@ func (s cmdShell) prepare(command string) (*execContext, error) { return &execContext{ executable: s.executable, - args: []string{"/D", "/E:ON", "/V:OFF", "/S", "/C", fmt.Sprintf(`CALL %s`, filename)}, + args: []string{"/D", "/E:ON", "/V:OFF", "/S", "/C", "CALL " + filename}, scriptFile: filename, }, nil } diff --git a/libs/fakefs/fakefs.go b/libs/fakefs/fakefs.go index a8d5eb873..050ee2d6e 100644 --- a/libs/fakefs/fakefs.go +++ b/libs/fakefs/fakefs.go @@ -1,12 +1,12 @@ package fakefs import ( - "fmt" + "errors" "io/fs" "time" ) -var ErrNotImplemented = fmt.Errorf("not implemented") +var ErrNotImplemented = errors.New("not implemented") // DirEntry is a fake implementation of [fs.DirEntry]. type DirEntry struct { diff --git a/libs/filer/fake_filer.go b/libs/filer/fake_filer.go index 76b8bcd94..1e1cbd985 100644 --- a/libs/filer/fake_filer.go +++ b/libs/filer/fake_filer.go @@ -2,7 +2,7 @@ package filer import ( "context" - "fmt" + "errors" "io" "io/fs" "path" @@ -17,7 +17,7 @@ type FakeFiler struct { } func (f *FakeFiler) Write(ctx context.Context, p string, reader io.Reader, mode ...WriteMode) error { - return fmt.Errorf("not implemented") + return errors.New("not implemented") } func (f *FakeFiler) Read(ctx context.Context, p string) (io.ReadCloser, error) { @@ -30,7 +30,7 @@ func (f *FakeFiler) Read(ctx context.Context, p string) (io.ReadCloser, error) { } func (f *FakeFiler) Delete(ctx context.Context, p string, mode ...DeleteMode) error { - return fmt.Errorf("not implemented") + return errors.New("not implemented") } func (f *FakeFiler) ReadDir(ctx context.Context, p string) ([]fs.DirEntry, error) { @@ -59,7 +59,7 @@ func (f *FakeFiler) ReadDir(ctx context.Context, p string) ([]fs.DirEntry, error } func (f *FakeFiler) Mkdir(ctx context.Context, path string) error { - return fmt.Errorf("not implemented") + return errors.New("not implemented") } func (f *FakeFiler) Stat(ctx context.Context, path string) (fs.FileInfo, error) { diff --git a/libs/filer/filer.go b/libs/filer/filer.go index 83dc560cb..372c82929 100644 --- a/libs/filer/filer.go +++ b/libs/filer/filer.go @@ -2,7 +2,6 @@ package filer import ( "context" - "fmt" "io" "io/fs" ) @@ -36,7 +35,7 @@ type FileAlreadyExistsError struct { } func (err FileAlreadyExistsError) Error() string { - return fmt.Sprintf("file already exists: %s", err.path) + return "file already exists: " + err.path } func (err FileAlreadyExistsError) Is(other error) bool { @@ -52,7 +51,7 @@ func (err FileDoesNotExistError) Is(other error) bool { } func (err FileDoesNotExistError) Error() string { - return fmt.Sprintf("file does not exist: %s", err.path) + return "file does not exist: " + err.path } type NoSuchDirectoryError struct { @@ -60,7 +59,7 @@ type NoSuchDirectoryError struct { } func (err NoSuchDirectoryError) Error() string { - return fmt.Sprintf("no such directory: %s", err.path) + return "no such directory: " + err.path } func (err NoSuchDirectoryError) Is(other error) bool { @@ -72,7 +71,7 @@ type NotADirectory struct { } func (err NotADirectory) Error() string { - return fmt.Sprintf("not a directory: %s", err.path) + return "not a directory: " + err.path } func (err NotADirectory) Is(other error) bool { @@ -84,7 +83,7 @@ type NotAFile struct { } func (err NotAFile) Error() string { - return fmt.Sprintf("not a file: %s", err.path) + return "not a file: " + err.path } func (err NotAFile) Is(other error) bool { @@ -96,7 +95,7 @@ type DirectoryNotEmptyError struct { } func (err DirectoryNotEmptyError) Error() string { - return fmt.Sprintf("directory not empty: %s", err.path) + return "directory not empty: " + err.path } func (err DirectoryNotEmptyError) Is(other error) bool { @@ -118,7 +117,7 @@ type PermissionError struct { } func (err PermissionError) Error() string { - return fmt.Sprintf("access denied: %s", err.path) + return "access denied: " + err.path } func (err PermissionError) Is(other error) bool { diff --git a/libs/filer/files_client.go b/libs/filer/files_client.go index 7ea1d0f03..98a534684 100644 --- a/libs/filer/files_client.go +++ b/libs/filer/files_client.go @@ -116,10 +116,7 @@ func (w *FilesClient) urlPath(name string) (string, string, error) { } // The user specified part of the path must be escaped. - urlPath := fmt.Sprintf( - "/api/2.0/fs/files/%s", - url.PathEscape(strings.TrimLeft(absPath, "/")), - ) + urlPath := "/api/2.0/fs/files/" + url.PathEscape(strings.TrimLeft(absPath, "/")) return absPath, urlPath, nil } diff --git a/libs/filer/workspace_files_cache_test.go b/libs/filer/workspace_files_cache_test.go index 8983c5982..a73f415c1 100644 --- a/libs/filer/workspace_files_cache_test.go +++ b/libs/filer/workspace_files_cache_test.go @@ -2,7 +2,7 @@ package filer import ( "context" - "fmt" + "errors" "io" "io/fs" "testing" @@ -11,7 +11,7 @@ import ( "github.com/stretchr/testify/assert" ) -var errNotImplemented = fmt.Errorf("not implemented") +var errNotImplemented = errors.New("not implemented") type cacheTestFiler struct { calls int diff --git a/libs/flags/json_flag_test.go b/libs/flags/json_flag_test.go index 956a3541c..4bebf8b68 100644 --- a/libs/flags/json_flag_test.go +++ b/libs/flags/json_flag_test.go @@ -1,7 +1,6 @@ package flags import ( - "fmt" "os" "path" "testing" @@ -68,7 +67,7 @@ func TestJsonFlagFile(t *testing.T) { fpath = f.Name() } - err := body.Set(fmt.Sprintf("@%s", fpath)) + err := body.Set("@" + fpath) require.NoError(t, err) diags := body.Unmarshal(&request) diff --git a/libs/flags/output.go b/libs/flags/output.go index 17da144bd..e0c799131 100644 --- a/libs/flags/output.go +++ b/libs/flags/output.go @@ -1,6 +1,7 @@ package flags import ( + "errors" "fmt" "strings" @@ -25,7 +26,7 @@ func (f *Output) Set(s string) error { case `json`, `text`: *f = Output(lower) default: - return fmt.Errorf("accepted arguments are json and text") + return errors.New("accepted arguments are json and text") } return nil } diff --git a/libs/git/reference.go b/libs/git/reference.go index e1126d4f2..6001d70de 100644 --- a/libs/git/reference.go +++ b/libs/git/reference.go @@ -13,8 +13,8 @@ import ( type ReferenceType string var ( - ErrNotAReferencePointer = fmt.Errorf("HEAD does not point to another reference") - ErrNotABranch = fmt.Errorf("HEAD is not a reference to a git branch") + ErrNotAReferencePointer = errors.New("HEAD does not point to another reference") + ErrNotABranch = errors.New("HEAD is not a reference to a git branch") ) const ( diff --git a/libs/git/repository_test.go b/libs/git/repository_test.go index 857df65a9..58a540190 100644 --- a/libs/git/repository_test.go +++ b/libs/git/repository_test.go @@ -1,7 +1,6 @@ package git import ( - "fmt" "os" "path/filepath" "strings" @@ -96,8 +95,7 @@ func (testRepo *testRepository) addOriginUrl(url string) { defer f.Close() _, err = f.WriteString( - fmt.Sprintf(`[remote "origin"] - url = %s`, url)) + "[remote \"origin\"]\n\turl = " + url) require.NoError(testRepo.t, err) // reload config to reflect the remote url diff --git a/libs/git/worktree_test.go b/libs/git/worktree_test.go index 3d620c483..072a9d348 100644 --- a/libs/git/worktree_test.go +++ b/libs/git/worktree_test.go @@ -53,12 +53,12 @@ func TestWorktreeResolveGitDir(t *testing.T) { writeGitCommonDir(t, dir, "../..") t.Run("relative", func(t *testing.T) { - writeGitDir(t, dir, fmt.Sprintf("gitdir: %s", "../.git/worktrees/my_worktree")) + writeGitDir(t, dir, "gitdir: "+"../.git/worktrees/my_worktree") verifyCorrectDirs(t, dir) }) t.Run("absolute", func(t *testing.T) { - writeGitDir(t, dir, fmt.Sprintf("gitdir: %s", filepath.Join(dir, ".git/worktrees/my_worktree"))) + writeGitDir(t, dir, "gitdir: "+filepath.Join(dir, ".git/worktrees/my_worktree")) verifyCorrectDirs(t, dir) }) @@ -77,7 +77,7 @@ func TestWorktreeResolveGitDir(t *testing.T) { func TestWorktreeResolveCommonDir(t *testing.T) { dir := setupWorktree(t) - writeGitDir(t, dir, fmt.Sprintf("gitdir: %s", "../.git/worktrees/my_worktree")) + writeGitDir(t, dir, "gitdir: "+"../.git/worktrees/my_worktree") t.Run("relative", func(t *testing.T) { writeGitCommonDir(t, dir, "../..") diff --git a/libs/jsonschema/instance.go b/libs/jsonschema/instance.go index 4440a2fe2..eb36822a0 100644 --- a/libs/jsonschema/instance.go +++ b/libs/jsonschema/instance.go @@ -2,6 +2,7 @@ package jsonschema import ( "encoding/json" + "errors" "fmt" "os" "slices" @@ -149,7 +150,7 @@ func (s *Schema) validateAnyOf(instance map[string]any) error { // According to the JSON schema RFC, anyOf must contain at least one schema. // https://json-schema.org/draft/2020-12/json-schema-core if len(s.AnyOf) == 0 { - return fmt.Errorf("anyOf must contain at least one schema") + return errors.New("anyOf must contain at least one schema") } for _, anyOf := range s.AnyOf { @@ -158,5 +159,5 @@ func (s *Schema) validateAnyOf(instance map[string]any) error { return nil } } - return fmt.Errorf("instance does not match any of the schemas in anyOf") + return errors.New("instance does not match any of the schemas in anyOf") } diff --git a/libs/jsonschema/utils.go b/libs/jsonschema/utils.go index ff9b88312..bc9339cae 100644 --- a/libs/jsonschema/utils.go +++ b/libs/jsonschema/utils.go @@ -150,7 +150,7 @@ func (e patternMatchError) Error() string { // If custom user error message is defined, return error with the custom message msg := e.FailureMessage if msg == "" { - msg = fmt.Sprintf("Expected to match regex pattern: %s", e.Pattern) + msg = "Expected to match regex pattern: " + e.Pattern } return fmt.Sprintf("invalid value for %s: %q. %s", e.PropertyName, e.PropertyValue, msg) } diff --git a/libs/locker/locker.go b/libs/locker/locker.go index eb59c9f74..aadc50b58 100644 --- a/libs/locker/locker.go +++ b/libs/locker/locker.go @@ -116,14 +116,14 @@ func (locker *Locker) assertLockHeld(ctx context.Context) error { // idempotent function since overwrite is set to true func (locker *Locker) Write(ctx context.Context, pathToFile string, content []byte) error { if !locker.Active { - return fmt.Errorf("failed to put file. deploy lock not held") + return errors.New("failed to put file. deploy lock not held") } return locker.filer.Write(ctx, pathToFile, bytes.NewReader(content), filer.OverwriteIfExists, filer.CreateParentDirectories) } func (locker *Locker) Read(ctx context.Context, path string) (io.ReadCloser, error) { if !locker.Active { - return nil, fmt.Errorf("failed to get file. deploy lock not held") + return nil, errors.New("failed to get file. deploy lock not held") } return locker.filer.Read(ctx, path) } @@ -173,7 +173,7 @@ func (locker *Locker) Lock(ctx context.Context, isForced bool) error { func (locker *Locker) Unlock(ctx context.Context, opts ...UnlockOption) error { if !locker.Active { - return fmt.Errorf("unlock called when lock is not held") + return errors.New("unlock called when lock is not held") } // if allowLockFileNotExist is set, do not throw an error if the lock file does diff --git a/libs/process/background_test.go b/libs/process/background_test.go index 7843375cf..5cc810f5d 100644 --- a/libs/process/background_test.go +++ b/libs/process/background_test.go @@ -4,7 +4,7 @@ import ( "bufio" "bytes" "context" - "fmt" + "errors" "os/exec" "strings" "testing" @@ -101,7 +101,7 @@ func TestBackgroundFails(t *testing.T) { func TestBackgroundFailsOnOption(t *testing.T) { ctx := context.Background() _, err := Background(ctx, []string{"ls", "/dev/null/x"}, func(_ context.Context, c *exec.Cmd) error { - return fmt.Errorf("nope") + return errors.New("nope") }) assert.EqualError(t, err, "nope") } diff --git a/libs/process/stub.go b/libs/process/stub.go index 8ab6fd705..528489098 100644 --- a/libs/process/stub.go +++ b/libs/process/stub.go @@ -168,7 +168,7 @@ func (s *processStub) run(cmd *exec.Cmd) error { } var zeroStub reponseStub if s.reponseStub == zeroStub { - return fmt.Errorf("no default process stub") + return errors.New("no default process stub") } err := s.reponseStub.err if s.reponseStub.stdout != "" { diff --git a/libs/process/stub_test.go b/libs/process/stub_test.go index 81afa3a89..158e8b3a6 100644 --- a/libs/process/stub_test.go +++ b/libs/process/stub_test.go @@ -2,7 +2,7 @@ package process_test import ( "context" - "fmt" + "errors" "os/exec" "testing" @@ -32,7 +32,7 @@ func TestStubOutput(t *testing.T) { func TestStubFailure(t *testing.T) { ctx := context.Background() ctx, stub := process.WithStub(ctx) - stub.WithFailure(fmt.Errorf("nope")) + stub.WithFailure(errors.New("nope")) _, err := process.Background(ctx, []string{"/bin/meeecho", "1"}) require.EqualError(t, err, "/bin/meeecho 1: nope") @@ -51,7 +51,7 @@ func TestStubCallback(t *testing.T) { if err != nil { return err } - return fmt.Errorf("yep") + return errors.New("yep") }) _, err := process.Background(ctx, []string{"/bin/meeecho", "1"}) @@ -70,7 +70,7 @@ func TestStubResponses(t *testing.T) { stub. WithStdoutFor("qux 1", "first"). WithStdoutFor("qux 2", "second"). - WithFailureFor("qux 3", fmt.Errorf("nope")) + WithFailureFor("qux 3", errors.New("nope")) first, err := process.Background(ctx, []string{"/path/is/irrelevant/qux", "1"}) require.NoError(t, err) diff --git a/libs/sync/event.go b/libs/sync/event.go index 05821a477..510a01954 100644 --- a/libs/sync/event.go +++ b/libs/sync/event.go @@ -52,10 +52,10 @@ func (e *EventChanges) IsEmpty() bool { func (e *EventChanges) String() string { var changes []string if len(e.Put) > 0 { - changes = append(changes, fmt.Sprintf("PUT: %s", strings.Join(e.Put, ", "))) + changes = append(changes, "PUT: "+strings.Join(e.Put, ", ")) } if len(e.Delete) > 0 { - changes = append(changes, fmt.Sprintf("DELETE: %s", strings.Join(e.Delete, ", "))) + changes = append(changes, "DELETE: "+strings.Join(e.Delete, ", ")) } return strings.Join(changes, ", ") } @@ -70,7 +70,7 @@ func (e *EventStart) String() string { return "" } - return fmt.Sprintf("Action: %s", e.EventChanges.String()) + return "Action: " + e.EventChanges.String() } func newEventStart(seq int, put, delete []string) Event { @@ -98,9 +98,9 @@ func (e *EventSyncProgress) String() string { switch e.Action { case EventActionPut: - return fmt.Sprintf("Uploaded %s", e.Path) + return "Uploaded " + e.Path case EventActionDelete: - return fmt.Sprintf("Deleted %s", e.Path) + return "Deleted " + e.Path default: panic("invalid action") } diff --git a/libs/sync/path.go b/libs/sync/path.go index 97a908965..87397be4b 100644 --- a/libs/sync/path.go +++ b/libs/sync/path.go @@ -14,7 +14,7 @@ import ( ) func repoPathForPath(me *iam.User, remotePath string) string { - base := path.Clean(fmt.Sprintf("/Repos/%s", me.UserName)) + base := path.Clean("/Repos/" + me.UserName) remotePath = path.Clean(remotePath) for strings.HasPrefix(path.Dir(remotePath), base) && path.Dir(remotePath) != base { remotePath = path.Dir(remotePath) diff --git a/libs/sync/sync.go b/libs/sync/sync.go index dc2c8992a..f13fa934a 100644 --- a/libs/sync/sync.go +++ b/libs/sync/sync.go @@ -2,6 +2,7 @@ package sync import ( "context" + "errors" "fmt" stdsync "sync" "time" @@ -93,7 +94,7 @@ func New(ctx context.Context, opts SyncOptions) (*Sync, error) { // specify the workspace by its resource ID. tracked in: https://databricks.atlassian.net/browse/DECO-194 opts.Host = opts.WorkspaceClient.Config.Host if opts.Host == "" { - return nil, fmt.Errorf("failed to resolve host for snapshot") + return nil, errors.New("failed to resolve host for snapshot") } // For full sync, we start with an empty snapshot. diff --git a/libs/tags/tag.go b/libs/tags/tag.go index 4e9b329ca..64eab947e 100644 --- a/libs/tags/tag.go +++ b/libs/tags/tag.go @@ -1,6 +1,7 @@ package tags import ( + "errors" "fmt" "regexp" "strings" @@ -21,13 +22,13 @@ type tag struct { func (t *tag) ValidateKey(s string) error { if len(s) == 0 { - return fmt.Errorf("key must not be empty") + return errors.New("key must not be empty") } if len(s) > t.keyLength { return fmt.Errorf("key length %d exceeds maximum of %d", len(s), t.keyLength) } if strings.ContainsFunc(s, func(r rune) bool { return !unicode.Is(latin1, r) }) { - return fmt.Errorf("key contains non-latin1 characters") + return errors.New("key contains non-latin1 characters") } if !t.keyPattern.MatchString(s) { return fmt.Errorf("key %q does not match pattern %q", s, t.keyPattern) @@ -40,7 +41,7 @@ func (t *tag) ValidateValue(s string) error { return fmt.Errorf("value length %d exceeds maximum of %d", len(s), t.valueLength) } if strings.ContainsFunc(s, func(r rune) bool { return !unicode.Is(latin1, r) }) { - return fmt.Errorf("value contains non-latin1 characters") + return errors.New("value contains non-latin1 characters") } if !t.valuePattern.MatchString(s) { return fmt.Errorf("value %q does not match pattern %q", s, t.valuePattern) diff --git a/libs/template/config.go b/libs/template/config.go index 8e7695b91..919ba2250 100644 --- a/libs/template/config.go +++ b/libs/template/config.go @@ -189,7 +189,7 @@ func (c *config) promptOnce(property *jsonschema.Schema, name, defaultVal, descr c.values[name], err = property.ParseString(userInput) if err != nil { // Show error and retry if validation fails - cmdio.LogString(c.ctx, fmt.Sprintf("Validation failed: %s", err.Error())) + cmdio.LogString(c.ctx, "Validation failed: "+err.Error()) return retriableError{err: err} } @@ -197,7 +197,7 @@ func (c *config) promptOnce(property *jsonschema.Schema, name, defaultVal, descr err = c.schema.ValidateInstance(c.values) if err != nil { // Show error and retry if validation fails - cmdio.LogString(c.ctx, fmt.Sprintf("Validation failed: %s", err.Error())) + cmdio.LogString(c.ctx, "Validation failed: "+err.Error()) return retriableError{err: err} } return nil diff --git a/libs/template/file_test.go b/libs/template/file_test.go index ced38c284..f4bf5652c 100644 --- a/libs/template/file_test.go +++ b/libs/template/file_test.go @@ -8,6 +8,7 @@ import ( "runtime" "testing" + "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/filer" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -27,8 +28,8 @@ func testInMemoryFile(t *testing.T, ctx context.Context, perm fs.FileMode) { err = f.Write(ctx, out) assert.NoError(t, err) - assertFileContent(t, filepath.Join(tmpDir, "a/b/c"), "123") - assertFilePermissions(t, filepath.Join(tmpDir, "a/b/c"), perm) + testutil.AssertFileContents(t, filepath.Join(tmpDir, "a/b/c"), "123") + testutil.AssertFilePermissions(t, filepath.Join(tmpDir, "a/b/c"), perm) } func testCopyFile(t *testing.T, ctx context.Context, perm fs.FileMode) { @@ -48,8 +49,8 @@ func testCopyFile(t *testing.T, ctx context.Context, perm fs.FileMode) { err = f.Write(ctx, out) assert.NoError(t, err) - assertFileContent(t, filepath.Join(tmpDir, "a/b/c"), "qwerty") - assertFilePermissions(t, filepath.Join(tmpDir, "a/b/c"), perm) + testutil.AssertFileContents(t, filepath.Join(tmpDir, "source"), "qwerty") + testutil.AssertFilePermissions(t, filepath.Join(tmpDir, "source"), perm) } func TestTemplateInMemoryFilePersistToDisk(t *testing.T) { diff --git a/libs/template/materialize_test.go b/libs/template/materialize_test.go new file mode 100644 index 000000000..c9331b43f --- /dev/null +++ b/libs/template/materialize_test.go @@ -0,0 +1,23 @@ +package template + +import ( + "context" + "os" + "testing" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/databricks-sdk-go" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestMaterializeForNonTemplateDirectory(t *testing.T) { + tmpDir := t.TempDir() + w, err := databricks.NewWorkspaceClient(&databricks.Config{}) + require.NoError(t, err) + ctx := root.SetWorkspaceClient(context.Background(), w) + + // Try to materialize a non-template directory. + err = Materialize(ctx, "", os.DirFS(tmpDir), nil) + assert.EqualError(t, err, "not a bundle template: expected to find a template schema file at "+schemaFileName) +} diff --git a/libs/template/renderer.go b/libs/template/renderer.go index 5030cd9df..679b7d8b7 100644 --- a/libs/template/renderer.go +++ b/libs/template/renderer.go @@ -150,6 +150,10 @@ func (r *renderer) computeFile(relPathTemplate string) (file, error) { } perm := info.Mode().Perm() + // Always include the write bit for the owner of the file. + // It does not make sense to have a file that is not writable by the owner. + perm |= 0o200 + // Execute relative path template to get destination path for the file relPath, err := r.executeTemplate(relPathTemplate) if err != nil { diff --git a/libs/template/renderer_test.go b/libs/template/renderer_test.go index 2c14009ff..b2ec388bd 100644 --- a/libs/template/renderer_test.go +++ b/libs/template/renderer_test.go @@ -2,7 +2,6 @@ package template import ( "context" - "fmt" "io/fs" "os" "path" @@ -17,6 +16,7 @@ import ( "github.com/databricks/cli/bundle/phases" "github.com/databricks/cli/cmd/root" "github.com/databricks/cli/internal/testutil" + "github.com/databricks/cli/libs/dbr" "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/filer" "github.com/databricks/cli/libs/tags" @@ -27,20 +27,23 @@ import ( "github.com/stretchr/testify/require" ) -func assertFileContent(t *testing.T, path, content string) { - b, err := os.ReadFile(path) - require.NoError(t, err) - assert.Equal(t, content, string(b)) -} +var ( + defaultFilePermissions fs.FileMode + defaultDirPermissions fs.FileMode +) -func assertFilePermissions(t *testing.T, path string, perm fs.FileMode) { - info, err := os.Stat(path) - require.NoError(t, err) - assert.Equal(t, perm, info.Mode().Perm()) +func init() { + if runtime.GOOS == "windows" { + defaultFilePermissions = fs.FileMode(0o666) + defaultDirPermissions = fs.FileMode(0o777) + } else { + defaultFilePermissions = fs.FileMode(0o644) + defaultDirPermissions = fs.FileMode(0o755) + } } func assertBuiltinTemplateValid(t *testing.T, template string, settings map[string]any, target string, isServicePrincipal, build bool, tempDir string) { - ctx := context.Background() + ctx := dbr.MockRuntime(context.Background(), false) templateFS, err := fs.Sub(builtinTemplates, path.Join("templates", template)) require.NoError(t, err) @@ -69,6 +72,10 @@ func assertBuiltinTemplateValid(t *testing.T, template string, settings map[stri err = renderer.persistToDisk(ctx, out) require.NoError(t, err) + // Verify permissions on file and directory + testutil.AssertFilePermissions(t, filepath.Join(tempDir, "my_project/README.md"), defaultFilePermissions) + testutil.AssertDirPermissions(t, filepath.Join(tempDir, "my_project/resources"), defaultDirPermissions) + b, err := bundle.Load(ctx, filepath.Join(tempDir, "my_project")) require.NoError(t, err) diags := bundle.Apply(ctx, b, phases.LoadNamedTarget(target)) @@ -347,10 +354,10 @@ func TestRendererPersistToDisk(t *testing.T) { assert.NoFileExists(t, filepath.Join(tmpDir, "a", "b", "c")) assert.NoFileExists(t, filepath.Join(tmpDir, "mno")) - assertFileContent(t, filepath.Join(tmpDir, "a", "b", "d"), "123") - assertFilePermissions(t, filepath.Join(tmpDir, "a", "b", "d"), 0o444) - assertFileContent(t, filepath.Join(tmpDir, "mmnn"), "456") - assertFilePermissions(t, filepath.Join(tmpDir, "mmnn"), 0o444) + testutil.AssertFileContents(t, filepath.Join(tmpDir, "a/b/d"), "123") + testutil.AssertFilePermissions(t, filepath.Join(tmpDir, "a/b/d"), fs.FileMode(0o444)) + testutil.AssertFileContents(t, filepath.Join(tmpDir, "mmnn"), "456") + testutil.AssertFilePermissions(t, filepath.Join(tmpDir, "mmnn"), fs.FileMode(0o444)) } func TestRendererWalk(t *testing.T) { @@ -544,7 +551,7 @@ func TestRendererErrorOnConflictingFile(t *testing.T) { out, err := filer.NewLocalClient(tmpDir) require.NoError(t, err) err = r.persistToDisk(ctx, out) - assert.EqualError(t, err, fmt.Sprintf("failed to initialize template, one or more files already exist: %s", "a")) + assert.EqualError(t, err, "failed to initialize template, one or more files already exist: "+"a") } func TestRendererNoErrorOnConflictingFileIfSkipped(t *testing.T) { @@ -617,8 +624,8 @@ func TestRendererFileTreeRendering(t *testing.T) { require.NoError(t, err) // Assert files and directories are correctly materialized. - assert.DirExists(t, filepath.Join(tmpDir, "my_directory")) - assert.FileExists(t, filepath.Join(tmpDir, "my_directory", "my_file")) + testutil.AssertDirPermissions(t, filepath.Join(tmpDir, "my_directory"), defaultDirPermissions) + testutil.AssertFilePermissions(t, filepath.Join(tmpDir, "my_directory", "my_file"), defaultFilePermissions) } func TestRendererSubTemplateInPath(t *testing.T) { diff --git a/libs/testdiff/golden.go b/libs/testdiff/golden.go index b67eb50a9..02213c88a 100644 --- a/libs/testdiff/golden.go +++ b/libs/testdiff/golden.go @@ -2,6 +2,7 @@ package testdiff import ( "context" + "flag" "fmt" "os" "regexp" @@ -16,7 +17,11 @@ import ( "github.com/stretchr/testify/assert" ) -var OverwriteMode = os.Getenv("TESTS_OUTPUT") == "OVERWRITE" +var OverwriteMode = false + +func init() { + flag.BoolVar(&OverwriteMode, "update", false, "Overwrite golden files") +} func ReadFile(t testutil.TestingT, ctx context.Context, filename string) string { t.Helper() diff --git a/libs/testdiff/testdiff.go b/libs/testdiff/testdiff.go index 71b781362..fef1d5ae2 100644 --- a/libs/testdiff/testdiff.go +++ b/libs/testdiff/testdiff.go @@ -25,7 +25,9 @@ func AssertEqualTexts(t testutil.TestingT, filename1, filename2, expected, out s } else { // only show diff for large texts diff := UnifiedDiff(filename1, filename2, expected, out) - t.Errorf("Diff:\n" + diff) + if diff != "" { + t.Errorf("Diff:\n" + diff) + } } }