diff --git a/.codegen.json b/.codegen.json index 73ab8c2a4..735e1ee31 100644 --- a/.codegen.json +++ b/.codegen.json @@ -11,7 +11,7 @@ "required": ["go"], "post_generate": [ "go test -timeout 240s -run TestConsistentDatabricksSdkVersion github.com/databricks/cli/internal/build", - "go run ./bundle/internal/schema/*.go ./bundle/schema/jsonschema.json", + "make schema", "echo 'bundle/internal/tf/schema/\\*.go linguist-generated=true' >> ./.gitattributes", "echo 'go.sum linguist-generated=true' >> ./.gitattributes", "echo 'bundle/schema/jsonschema.json linguist-generated=true' >> ./.gitattributes" diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index a2ba58aa5..8622b29ca 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -f2385add116e3716c8a90a0b68e204deb40f996c \ No newline at end of file +a6a317df8327c9b1e5cb59a03a42ffa2aabeef6d \ No newline at end of file diff --git a/.codegen/service.go.tmpl b/.codegen/service.go.tmpl index ef7977e1b..ee2c7b0fd 100644 --- a/.codegen/service.go.tmpl +++ b/.codegen/service.go.tmpl @@ -411,5 +411,5 @@ func new{{.PascalName}}() *cobra.Command { {{- define "request-body-obj" -}} {{- $method := .Method -}} {{- $field := .Field -}} - {{$method.CamelName}}Req{{ if (and $method.RequestBodyField (not $field.IsPath)) }}.{{$method.RequestBodyField.PascalName}}{{end}}.{{$field.PascalName}} + {{$method.CamelName}}Req{{ if (and $method.RequestBodyField (and (not $field.IsPath) (not $field.IsQuery))) }}.{{$method.RequestBodyField.PascalName}}{{end}}.{{$field.PascalName}} {{- end -}} diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs new file mode 100644 index 000000000..6304b3604 --- /dev/null +++ b/.git-blame-ignore-revs @@ -0,0 +1,8 @@ +# Enable gofumpt and goimports in golangci-lint (#1999) +2e018cfaec200a02ee2bd5b389e7da3c6f15f460 + +# Enable errcheck everywhere and fix or silent remaining issues (#1987) +8d5351c1c3d7befda4baae5d6adb99367aa50b3c + +# Add error checking in tests and enable errcheck there (#1980) +1b2be1b2cb4b7909df2a8ad4cb6a0f43e8fcf0c6 diff --git a/.gitattributes b/.gitattributes index 2755c02d7..0a8ddf3cb 100755 --- a/.gitattributes +++ b/.gitattributes @@ -8,6 +8,7 @@ cmd/account/custom-app-integration/custom-app-integration.go linguist-generated= cmd/account/disable-legacy-features/disable-legacy-features.go linguist-generated=true cmd/account/encryption-keys/encryption-keys.go linguist-generated=true cmd/account/esm-enablement-account/esm-enablement-account.go linguist-generated=true +cmd/account/federation-policy/federation-policy.go linguist-generated=true cmd/account/groups/groups.go linguist-generated=true cmd/account/ip-access-lists/ip-access-lists.go linguist-generated=true cmd/account/log-delivery/log-delivery.go linguist-generated=true @@ -19,6 +20,7 @@ cmd/account/o-auth-published-apps/o-auth-published-apps.go linguist-generated=tr cmd/account/personal-compute/personal-compute.go linguist-generated=true cmd/account/private-access/private-access.go linguist-generated=true cmd/account/published-app-integration/published-app-integration.go linguist-generated=true +cmd/account/service-principal-federation-policy/service-principal-federation-policy.go linguist-generated=true cmd/account/service-principal-secrets/service-principal-secrets.go linguist-generated=true cmd/account/service-principals/service-principals.go linguist-generated=true cmd/account/settings/settings.go linguist-generated=true @@ -37,6 +39,9 @@ cmd/workspace/apps/apps.go linguist-generated=true cmd/workspace/artifact-allowlists/artifact-allowlists.go linguist-generated=true cmd/workspace/automatic-cluster-update/automatic-cluster-update.go linguist-generated=true cmd/workspace/catalogs/catalogs.go linguist-generated=true +cmd/workspace/clean-room-assets/clean-room-assets.go linguist-generated=true +cmd/workspace/clean-room-task-runs/clean-room-task-runs.go linguist-generated=true +cmd/workspace/clean-rooms/clean-rooms.go linguist-generated=true cmd/workspace/cluster-policies/cluster-policies.go linguist-generated=true cmd/workspace/clusters/clusters.go linguist-generated=true cmd/workspace/cmd.go linguist-generated=true diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 000000000..76835de7d --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1 @@ +* @pietern @andrewnester @shreyas-goenka @denik diff --git a/.github/dependabot.yml b/.github/dependabot.yml index f1b219b47..e7d7ad6b6 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -4,3 +4,7 @@ updates: directory: "/" schedule: interval: "weekly" + - package-ecosystem: "github-actions" + directory: "/" + schedule: + interval: "monthly" diff --git a/.github/workflows/close-stale-issues.yml b/.github/workflows/close-stale-issues.yml new file mode 100644 index 000000000..7bf754319 --- /dev/null +++ b/.github/workflows/close-stale-issues.yml @@ -0,0 +1,38 @@ +name: "Close Stale Issues" + +on: + workflow_dispatch: + schedule: + - cron: "0 0 * * *" # Run at midnight every day + +jobs: + cleanup: + name: Stale issue job + runs-on: + group: databricks-deco-testing-runner-group + labels: ubuntu-latest-deco + + permissions: + issues: write + contents: read + pull-requests: write + + steps: + - uses: actions/stale@v9 + with: + stale-issue-message: This issue has not received a response in a while. If you want to keep this issue open, please leave a comment below and auto-close will be canceled. + stale-pr-message: This PR has not received an update in a while. If you want to keep this PR open, please leave a comment below or push a new commit and auto-close will be canceled. + + # These labels are required + stale-issue-label: Stale + stale-pr-label: Stale + + exempt-issue-labels: No Autoclose + exempt-pr-labels: No Autoclose + + # Issue timing + days-before-stale: 60 + days-before-close: 30 + + repo-token: ${{ secrets.GITHUB_TOKEN }} + loglevel: DEBUG diff --git a/.github/workflows/external-message.yml b/.github/workflows/external-message.yml index 1970735f9..f06d81a47 100644 --- a/.github/workflows/external-message.yml +++ b/.github/workflows/external-message.yml @@ -13,10 +13,17 @@ on: jobs: comment-on-pr: - runs-on: ubuntu-latest + runs-on: + group: databricks-deco-testing-runner-group + labels: ubuntu-latest-deco + permissions: pull-requests: write + # Only run this job for PRs from forks. + # Integration tests are not run automatically for PRs from forks. + if: "${{ github.event.pull_request.head.repo.fork }}" + steps: - uses: actions/checkout@v4 @@ -43,7 +50,7 @@ jobs: run: | gh pr comment ${{ github.event.pull_request.number }} --body \ " - If integration tests don't run automatically, an authorized user can run them manually by following the instructions below: + An authorized user can trigger integration tests manually by following the instructions below: Trigger: [go/deco-tests-run/cli](https://go/deco-tests-run/cli) diff --git a/.github/workflows/integration-approve.yml b/.github/workflows/integration-approve.yml new file mode 100644 index 000000000..293d31a2a --- /dev/null +++ b/.github/workflows/integration-approve.yml @@ -0,0 +1,34 @@ +name: integration-approve + +on: + merge_group: + +jobs: + # Trigger for merge groups. + # + # Statuses and checks apply to specific commits (by hash). + # Enforcement of required checks is done both at the PR level and the merge queue level. + # In case of multiple commits in a single PR, the hash of the squashed commit + # will not match the one for the latest (approved) commit in the PR. + # + # We auto approve the check for the merge queue for two reasons: + # + # * Queue times out due to duration of tests. + # * Avoid running integration tests twice, since it was already run at the tip of the branch before squashing. + # + trigger: + runs-on: + group: databricks-deco-testing-runner-group + labels: ubuntu-latest-deco + + steps: + - name: Auto-approve squashed commit + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + shell: bash + run: | + gh api -X POST -H "Accept: application/vnd.github+json" \ + -H "X-GitHub-Api-Version: 2022-11-28" \ + /repos/${{ github.repository }}/statuses/${{ github.sha }} \ + -f 'state=success' \ + -f 'context=Integration Tests Check' diff --git a/.github/workflows/integration-main.yml b/.github/workflows/integration-main.yml new file mode 100644 index 000000000..0b6032d50 --- /dev/null +++ b/.github/workflows/integration-main.yml @@ -0,0 +1,36 @@ +name: integration-main + +on: + push: + branches: + - main + +jobs: + # Trigger for pushes to the main branch. + # + # This workflow triggers the integration test workflow in a different repository. + # It requires secrets from the "test-trigger-is" environment, which are only available to authorized users. + trigger: + runs-on: + group: databricks-deco-testing-runner-group + labels: ubuntu-latest-deco + + environment: "test-trigger-is" + + steps: + - name: Generate GitHub App Token + id: generate-token + uses: actions/create-github-app-token@v1 + with: + app-id: ${{ secrets.DECO_WORKFLOW_TRIGGER_APP_ID }} + private-key: ${{ secrets.DECO_WORKFLOW_TRIGGER_PRIVATE_KEY }} + owner: ${{ secrets.ORG_NAME }} + repositories: ${{secrets.REPO_NAME}} + + - name: Trigger Workflow in Another Repo + env: + GH_TOKEN: ${{ steps.generate-token.outputs.token }} + run: | + gh workflow run cli-isolated-nightly.yml -R ${{ secrets.ORG_NAME }}/${{secrets.REPO_NAME}} \ + --ref main \ + -f commit_sha=${{ github.event.after }} diff --git a/.github/workflows/integration-pr.yml b/.github/workflows/integration-pr.yml new file mode 100644 index 000000000..0f9c4797a --- /dev/null +++ b/.github/workflows/integration-pr.yml @@ -0,0 +1,40 @@ +name: integration-pr + +on: + pull_request: + types: [opened, synchronize] + +jobs: + # Trigger for pull requests. + # + # This workflow triggers the integration test workflow in a different repository. + # It requires secrets from the "test-trigger-is" environment, which are only available to authorized users. + trigger: + runs-on: + group: databricks-deco-testing-runner-group + labels: ubuntu-latest-deco + + environment: "test-trigger-is" + + # Only run this job for PRs from branches on the main repository and not from forks. + # Workflows triggered by PRs from forks don't have access to the "test-trigger-is" environment. + if: "${{ !github.event.pull_request.head.repo.fork }}" + + steps: + - name: Generate GitHub App Token + id: generate-token + uses: actions/create-github-app-token@v1 + with: + app-id: ${{ secrets.DECO_WORKFLOW_TRIGGER_APP_ID }} + private-key: ${{ secrets.DECO_WORKFLOW_TRIGGER_PRIVATE_KEY }} + owner: ${{ secrets.ORG_NAME }} + repositories: ${{secrets.REPO_NAME}} + + - name: Trigger Workflow in Another Repo + env: + GH_TOKEN: ${{ steps.generate-token.outputs.token }} + run: | + gh workflow run cli-isolated-pr.yml -R ${{ secrets.ORG_NAME }}/${{secrets.REPO_NAME}} \ + --ref main \ + -f pull_request_number=${{ github.event.pull_request.number }} \ + -f commit_sha=${{ github.event.pull_request.head.sha }} diff --git a/.github/workflows/integration-tests.yml b/.github/workflows/integration-tests.yml deleted file mode 100644 index d56728c28..000000000 --- a/.github/workflows/integration-tests.yml +++ /dev/null @@ -1,78 +0,0 @@ -name: integration - -on: - - pull_request: - types: [opened, synchronize] - - merge_group: - - -jobs: - check-token: - runs-on: ubuntu-latest - environment: "test-trigger-is" - outputs: - has_token: ${{ steps.set-token-status.outputs.has_token }} - steps: - - name: Check if DECO_WORKFLOW_TRIGGER_APP_ID is set - id: set-token-status - run: | - if [ -z "${{ secrets.DECO_WORKFLOW_TRIGGER_APP_ID }}" ]; then - echo "DECO_WORKFLOW_TRIGGER_APP_ID is empty. User has no access to secrets." - echo "::set-output name=has_token::false" - else - echo "DECO_WORKFLOW_TRIGGER_APP_ID is set. User has access to secrets." - echo "::set-output name=has_token::true" - fi - - trigger-tests: - runs-on: ubuntu-latest - needs: check-token - if: github.event_name == 'pull_request' && needs.check-token.outputs.has_token == 'true' - environment: "test-trigger-is" - - steps: - - uses: actions/checkout@v4 - - - name: Generate GitHub App Token - id: generate-token - uses: actions/create-github-app-token@v1 - with: - app-id: ${{ secrets.DECO_WORKFLOW_TRIGGER_APP_ID }} - private-key: ${{ secrets.DECO_WORKFLOW_TRIGGER_PRIVATE_KEY }} - owner: ${{ secrets.ORG_NAME }} - repositories: ${{secrets.REPO_NAME}} - - - name: Trigger Workflow in Another Repo - env: - GH_TOKEN: ${{ steps.generate-token.outputs.token }} - run: | - gh workflow run cli-isolated-pr.yml -R ${{ secrets.ORG_NAME }}/${{secrets.REPO_NAME}} \ - --ref main \ - -f pull_request_number=${{ github.event.pull_request.number }} \ - -f commit_sha=${{ github.event.pull_request.head.sha }} - - - - # Statuses and checks apply to specific commits (by hash). - # Enforcement of required checks is done both at the PR level and the merge queue level. - # In case of multiple commits in a single PR, the hash of the squashed commit - # will not match the one for the latest (approved) commit in the PR. - # We auto approve the check for the merge queue for two reasons: - # * Queue times out due to duration of tests. - # * Avoid running integration tests twice, since it was already run at the tip of the branch before squashing. - auto-approve: - if: github.event_name == 'merge_group' - runs-on: ubuntu-latest - steps: - - name: Mark Check - env: - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - shell: bash - run: | - gh api -X POST -H "Accept: application/vnd.github+json" \ - -H "X-GitHub-Api-Version: 2022-11-28" \ - /repos/${{ github.repository }}/statuses/${{ github.sha }} \ - -f 'state=success' \ - -f 'context=Integration Tests Check' diff --git a/.github/workflows/publish-winget.yml b/.github/workflows/publish-winget.yml index 19603e669..267077102 100644 --- a/.github/workflows/publish-winget.yml +++ b/.github/workflows/publish-winget.yml @@ -2,11 +2,19 @@ name: publish-winget on: workflow_dispatch: + inputs: + tag: + description: 'Tag to publish' + default: '' jobs: publish-to-winget-pkgs: - runs-on: windows-latest + runs-on: + group: databricks-protected-runner-group + labels: windows-server-latest + environment: release + steps: - uses: vedantmgoyal2009/winget-releaser@93fd8b606a1672ec3e5c6c3bb19426be68d1a8b0 # https://github.com/vedantmgoyal2009/winget-releaser/releases/tag/v2 with: @@ -14,3 +22,7 @@ jobs: installers-regex: 'windows_.*-signed\.zip$' # Only signed Windows releases token: ${{ secrets.ENG_DEV_ECOSYSTEM_BOT_TOKEN }} fork-user: eng-dev-ecosystem-bot + + # Use the tag from the input, or the ref name if the input is not provided. + # The ref name is equal to the tag name when this workflow is triggered by the "sign-cli" command. + release-tag: ${{ inputs.tag || github.ref_name }} diff --git a/.github/workflows/push.yml b/.github/workflows/push.yml index ebb3e75d4..ddb2fb002 100644 --- a/.github/workflows/push.yml +++ b/.github/workflows/push.yml @@ -13,9 +13,26 @@ on: # seed the build cache. branches: - main + schedule: + - cron: '0 0,12 * * *' # Runs at 00:00 and 12:00 UTC daily + +env: + GOTESTSUM_FORMAT: github-actions jobs: + cleanups: + runs-on: + group: databricks-deco-testing-runner-group + labels: ubuntu-latest-deco + steps: + - name: Clean up cache if running on schedule + if: ${{ github.event_name == 'schedule' }} + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: gh cache delete --all --repo databricks/cli || true + tests: + needs: cleanups runs-on: ${{ matrix.os }} strategy: @@ -33,18 +50,21 @@ jobs: - name: Setup Go uses: actions/setup-go@v5 with: - go-version: 1.23.2 + go-version: 1.23.4 - name: Setup Python uses: actions/setup-python@v5 with: python-version: '3.9' + - name: Install uv + uses: astral-sh/setup-uv@v5 + - name: Set go env run: | echo "GOPATH=$(go env GOPATH)" >> $GITHUB_ENV echo "$(go env GOPATH)/bin" >> $GITHUB_PATH - go install gotest.tools/gotestsum@latest + go install gotest.tools/gotestsum@v1.12.0 - name: Pull external libraries run: | @@ -52,58 +72,36 @@ jobs: pip3 install wheel - name: Run tests - run: make testonly - - - name: Publish test coverage - uses: codecov/codecov-action@v4 - - fmt: - runs-on: ubuntu-latest - - steps: - - name: Checkout - uses: actions/checkout@v4 - - - name: Setup Go - uses: actions/setup-go@v5 - with: - go-version: 1.23.2 - - # No need to download cached dependencies when running gofmt. - cache: false - - - name: Install goimports - run: | - go install golang.org/x/tools/cmd/goimports@latest - - - name: Run make fmt - run: | - make fmt - - - name: Run go mod tidy - run: | - go mod tidy - - - name: Fail on differences - run: | - # Exit with status code 1 if there are differences (i.e. unformatted files) - git diff --exit-code + run: make test golangci: + needs: cleanups name: lint runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - uses: actions/setup-go@v5 with: - go-version: 1.23.2 + go-version: 1.23.4 + # Use different schema from regular job, to avoid overwriting the same key + cache-dependency-path: | + go.sum + .golangci.yaml + - name: Run go mod tidy + run: | + go mod tidy + - name: Fail on differences + run: | + # Exit with status code 1 if there are differences (i.e. unformatted files) + git diff --exit-code - name: golangci-lint uses: golangci/golangci-lint-action@v6 with: - version: v1.62.2 + version: v1.63.4 args: --timeout=15m validate-bundle-schema: + needs: cleanups runs-on: ubuntu-latest steps: @@ -113,7 +111,18 @@ jobs: - name: Setup Go uses: actions/setup-go@v5 with: - go-version: 1.23.2 + go-version: 1.23.4 + # Use different schema from regular job, to avoid overwriting the same key + cache-dependency-path: | + go.sum + bundle/internal/schema/*.* + + - name: Verify that the schema is up to date + run: | + if ! ( make schema && git diff --exit-code ); then + echo "The schema is not up to date. Please run 'make schema' and commit the changes." + exit 1 + fi # Github repo: https://github.com/ajv-validator/ajv-cli - name: Install ajv-cli @@ -124,14 +133,19 @@ jobs: # By default the ajv-cli runs in strict mode which will fail if the schema # itself is not valid. Strict mode is more strict than the JSON schema # specification. See for details: https://ajv.js.org/options.html#strict-mode-options + # The ajv-cli is configured to use the markdownDescription keyword which is not part of the JSON schema specification, + # but is used in editors like VSCode to render markdown in the description field - name: Validate bundle schema run: | go run main.go bundle schema > schema.json + # Add markdownDescription keyword to ajv + echo "module.exports=function(a){a.addKeyword('markdownDescription')}" >> keywords.js + for file in ./bundle/internal/schema/testdata/pass/*.yml; do - ajv test -s schema.json -d $file --valid + ajv test -s schema.json -d $file --valid -c=./keywords.js done for file in ./bundle/internal/schema/testdata/fail/*.yml; do - ajv test -s schema.json -d $file --invalid + ajv test -s schema.json -d $file --invalid -c=./keywords.js done diff --git a/.github/workflows/release-snapshot.yml b/.github/workflows/release-snapshot.yml index 4a7597dc0..5c56a294e 100644 --- a/.github/workflows/release-snapshot.yml +++ b/.github/workflows/release-snapshot.yml @@ -20,7 +20,10 @@ on: jobs: goreleaser: - runs-on: ubuntu-latest + runs-on: + group: databricks-deco-testing-runner-group + labels: ubuntu-latest-deco + steps: - name: Checkout repository and submodules uses: actions/checkout@v4 @@ -31,7 +34,7 @@ jobs: - name: Setup Go uses: actions/setup-go@v5 with: - go-version: 1.23.2 + go-version: 1.23.4 # The default cache key for this action considers only the `go.sum` file. # We include .goreleaser.yaml here to differentiate from the cache used by the push action diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index e8f59f9b8..061688506 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -9,9 +9,13 @@ on: jobs: goreleaser: + runs-on: + group: databricks-deco-testing-runner-group + labels: ubuntu-latest-deco + outputs: artifacts: ${{ steps.releaser.outputs.artifacts }} - runs-on: ubuntu-latest + steps: - name: Checkout repository and submodules uses: actions/checkout@v4 @@ -22,7 +26,7 @@ jobs: - name: Setup Go uses: actions/setup-go@v5 with: - go-version: 1.23.2 + go-version: 1.23.4 # The default cache key for this action considers only the `go.sum` file. # We include .goreleaser.yaml here to differentiate from the cache used by the push action @@ -54,8 +58,12 @@ jobs: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} create-setup-cli-release-pr: + runs-on: + group: databricks-deco-testing-runner-group + labels: ubuntu-latest-deco + needs: goreleaser - runs-on: ubuntu-latest + steps: - name: Set VERSION variable from tag run: | @@ -78,8 +86,12 @@ jobs: }); create-homebrew-tap-release-pr: + runs-on: + group: databricks-deco-testing-runner-group + labels: ubuntu-latest-deco + needs: goreleaser - runs-on: ubuntu-latest + steps: - name: Set VERSION variable from tag run: | @@ -115,8 +127,12 @@ jobs: }); create-vscode-extension-update-pr: + runs-on: + group: databricks-deco-testing-runner-group + labels: ubuntu-latest-deco + needs: goreleaser - runs-on: ubuntu-latest + steps: - name: Set VERSION variable from tag run: | diff --git a/.golangci.yaml b/.golangci.yaml index 82e4d9848..07a6afdc5 100644 --- a/.golangci.yaml +++ b/.golangci.yaml @@ -2,21 +2,46 @@ linters: disable-all: true enable: - bodyclose - # errcheck and govet are part of default setup and should be included but give too many errors now - # once errors are fixed, they should be enabled here: - #- errcheck + - errcheck - gosimple - #- govet + - govet - ineffassign - staticcheck - unused - gofmt + - gofumpt + - goimports + - testifylint + - intrange + - mirror + - perfsprint linters-settings: + govet: + enable-all: true + disable: + - fieldalignment + - shadow gofmt: rewrite-rules: - pattern: 'a[b:len(a)]' replacement: 'a[b:]' - pattern: 'interface{}' replacement: 'any' + errcheck: + exclude-functions: + - (*github.com/spf13/cobra.Command).RegisterFlagCompletionFunc + - (*github.com/spf13/cobra.Command).MarkFlagRequired + - (*github.com/spf13/pflag.FlagSet).MarkDeprecated + - (*github.com/spf13/pflag.FlagSet).MarkHidden + gofumpt: + module-path: github.com/databricks/cli + extra-rules: true + testifylint: + enable-all: true + disable: + # good check, but we have too many assert.(No)?Errorf? so excluding for now + - require-error issues: exclude-dirs-use-default: false # recommended by docs https://golangci-lint.run/usage/false-positives/ + max-issues-per-linter: 1000 + max-same-issues: 1000 diff --git a/.vscode/settings.json b/.vscode/settings.json index 853e84de8..f8b04f126 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -7,11 +7,14 @@ "go.lintFlags": [ "--fast" ], + "go.useLanguageServer": true, + "gopls": { + "formatting.gofumpt": true + }, "files.trimTrailingWhitespace": true, "files.insertFinalNewline": true, "files.trimFinalNewlines": true, "python.envFile": "${workspaceRoot}/.env", - "databricks.python.envFile": "${workspaceFolder}/.env", "python.analysis.stubPath": ".vscode", "jupyter.interactiveWindow.cellMarker.codeRegex": "^# COMMAND ----------|^# Databricks notebook source|^(#\\s*%%|#\\s*\\|#\\s*In\\[\\d*?\\]|#\\s*In\\[ \\])", "jupyter.interactiveWindow.cellMarker.default": "# COMMAND ----------" diff --git a/CHANGELOG.md b/CHANGELOG.md index 56207686a..5b59fa540 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,37 @@ # Version changelog +## [Release] Release v0.238.0 + +Bundles: + * Fix finding Python within virtualenv on Windows ([#2034](https://github.com/databricks/cli/pull/2034)). + * Include missing field descriptions in JSON schema ([#2045](https://github.com/databricks/cli/pull/2045)). + * Add validation for volume referenced from `artifact_path` ([#2050](https://github.com/databricks/cli/pull/2050)). + * Handle `${workspace.file_path}` references in source-linked deployments ([#2046](https://github.com/databricks/cli/pull/2046)). + * Set the write bit for files written during template initialization ([#2068](https://github.com/databricks/cli/pull/2068)). + +## [Release] Release v0.237.0 + +Bundles: + * Allow overriding compute for non-development mode targets ([#1899](https://github.com/databricks/cli/pull/1899)). + * Show an error when using a cluster override with 'mode: production' ([#1994](https://github.com/databricks/cli/pull/1994)). + +API Changes: + * Added `databricks account federation-policy` command group. + * Added `databricks account service-principal-federation-policy` command group. + * Added `databricks aibi-dashboard-embedding-access-policy delete` command. + * Added `databricks aibi-dashboard-embedding-approved-domains delete` command. + +OpenAPI commit a6a317df8327c9b1e5cb59a03a42ffa2aabeef6d (2024-12-16) +Dependency updates: + * Upgrade TF provider to 1.62.0 ([#2030](https://github.com/databricks/cli/pull/2030)). + * Upgrade Go SDK to 0.54.0 ([#2029](https://github.com/databricks/cli/pull/2029)). + * Bump TF codegen dependencies to latest ([#1961](https://github.com/databricks/cli/pull/1961)). + * Bump golang.org/x/term from 0.26.0 to 0.27.0 ([#1983](https://github.com/databricks/cli/pull/1983)). + * Bump golang.org/x/sync from 0.9.0 to 0.10.0 ([#1984](https://github.com/databricks/cli/pull/1984)). + * Bump github.com/databricks/databricks-sdk-go from 0.52.0 to 0.53.0 ([#1985](https://github.com/databricks/cli/pull/1985)). + * Bump golang.org/x/crypto from 0.24.0 to 0.31.0 ([#2006](https://github.com/databricks/cli/pull/2006)). + * Bump golang.org/x/crypto from 0.30.0 to 0.31.0 in /bundle/internal/tf/codegen ([#2005](https://github.com/databricks/cli/pull/2005)). + ## [Release] Release v0.236.0 **New features for Databricks Asset Bundles:** diff --git a/Makefile b/Makefile index 13787fdda..2c84d88ba 100644 --- a/Makefile +++ b/Makefile @@ -1,40 +1,48 @@ default: build +PACKAGES=./acceptance/... ./libs/... ./internal/... ./cmd/... ./bundle/... . + +GOTESTSUM_FORMAT ?= pkgname-and-test-fails + +lint: + golangci-lint run --fix + +lintcheck: + golangci-lint run ./... + +# Note 'make lint' will do formatting as well. However, if there are compilation errors, +# formatting/goimports will not be applied by 'make lint'. However, it will be applied by 'make fmt'. +# If you need to ensure that formatting & imports are always fixed, do "make fmt lint" fmt: - @echo "✓ Formatting source code with goimports ..." - @goimports -w $(shell find . -type f -name '*.go' -not -path "./vendor/*") - @echo "✓ Formatting source code with gofmt ..." - @gofmt -w $(shell find . -type f -name '*.go' -not -path "./vendor/*") + golangci-lint run --enable-only="gofmt,gofumpt,goimports" --fix ./... -lint: vendor - @echo "✓ Linting source code with https://golangci-lint.run/ ..." - @golangci-lint run ./... +test: + gotestsum --format ${GOTESTSUM_FORMAT} --no-summary=skipped -- ${PACKAGES} -lintfix: vendor - @echo "✓ Linting source code with 'golangci-lint run --fix' ..." - @golangci-lint run --fix ./... +cover: + gotestsum --format ${GOTESTSUM_FORMAT} --no-summary=skipped -- -coverprofile=coverage.txt ${PACKAGES} -test: lint testonly - -testonly: - @echo "✓ Running tests ..." - @gotestsum --format pkgname-and-test-fails --no-summary=skipped --raw-command go test -v -json -short -coverprofile=coverage.txt ./... - -coverage: test - @echo "✓ Opening coverage for unit tests ..." - @go tool cover -html=coverage.txt +showcover: + go tool cover -html=coverage.txt build: vendor - @echo "✓ Building source code with go build ..." - @go build -mod vendor + go build -mod vendor snapshot: - @echo "✓ Building dev snapshot" - @go build -o .databricks/databricks + go build -o .databricks/databricks vendor: - @echo "✓ Filling vendor folder with library code ..." - @go mod vendor + go mod vendor + +schema: + go run ./bundle/internal/schema ./bundle/internal/schema ./bundle/schema/jsonschema.json -.PHONY: build vendor coverage test lint fmt +INTEGRATION = gotestsum --format github-actions --rerun-fails --jsonfile output.json --packages "./integration/..." -- -parallel 4 -timeout=2h +integration: + $(INTEGRATION) + +integration-short: + $(INTEGRATION) -short + +.PHONY: lint lintcheck fmt test cover showcover build snapshot vendor schema integration integration-short diff --git a/NOTICE b/NOTICE index d8306510e..f6b59e0b0 100644 --- a/NOTICE +++ b/NOTICE @@ -73,10 +73,6 @@ fatih/color - https://github.com/fatih/color Copyright (c) 2013 Fatih Arslan License - https://github.com/fatih/color/blob/main/LICENSE.md -ghodss/yaml - https://github.com/ghodss/yaml -Copyright (c) 2014 Sam Ghods -License - https://github.com/ghodss/yaml/blob/master/LICENSE - Masterminds/semver - https://github.com/Masterminds/semver Copyright (C) 2014-2019, Matt Butcher and Matt Farina License - https://github.com/Masterminds/semver/blob/master/LICENSE.txt @@ -101,3 +97,11 @@ License - https://github.com/stretchr/testify/blob/master/LICENSE whilp/git-urls - https://github.com/whilp/git-urls Copyright (c) 2020 Will Maier License - https://github.com/whilp/git-urls/blob/master/LICENSE + +github.com/wI2L/jsondiff v0.6.1 +Copyright (c) 2020-2024 William Poussier +License - https://github.com/wI2L/jsondiff/blob/master/LICENSE + +https://github.com/hexops/gotextdiff +Copyright (c) 2009 The Go Authors. All rights reserved. +License - https://github.com/hexops/gotextdiff/blob/main/LICENSE diff --git a/acceptance/README.md b/acceptance/README.md new file mode 100644 index 000000000..42a37d253 --- /dev/null +++ b/acceptance/README.md @@ -0,0 +1,19 @@ +Acceptance tests are blackbox tests that are run against compiled binary. + +Currently these tests are run against "fake" HTTP server pretending to be Databricks API. However, they will be extended to run against real environment as regular integration tests. + +To author a test, + - Add a new directory under `acceptance`. Any level of nesting is supported. + - Add `databricks.yml` there. + - Add `script` with commands to run, e.g. `$CLI bundle validate`. The test case is recognized by presence of `script`. + +The test runner will run script and capture output and compare it with `output.txt` file in the same directory. + +In order to write `output.txt` for the first time or overwrite it with the current output pass -update flag to go test. + +The scripts are run with `bash -e` so any errors will be propagated. They are captured in `output.txt` by appending `Exit code: N` line at the end. + +For more complex tests one can also use: +- `errcode` helper: if the command fails with non-zero code, it appends `Exit code: N` to the output but returns success to caller (bash), allowing continuation of script. +- `trace` helper: prints the arguments before executing the command. +- custom output files: redirect output to custom file (it must start with `out`), e.g. `$CLI bundle validate > out.txt 2> out.error.txt`. diff --git a/acceptance/acceptance_test.go b/acceptance/acceptance_test.go new file mode 100644 index 000000000..b9fb219dc --- /dev/null +++ b/acceptance/acceptance_test.go @@ -0,0 +1,311 @@ +package acceptance_test + +import ( + "errors" + "fmt" + "io" + "os" + "os/exec" + "path/filepath" + "runtime" + "slices" + "sort" + "strings" + "testing" + "time" + + "github.com/databricks/cli/internal/testutil" + "github.com/databricks/cli/libs/env" + "github.com/databricks/cli/libs/testdiff" + "github.com/stretchr/testify/require" +) + +var KeepTmp = os.Getenv("KEEP_TMP") != "" + +const ( + EntryPointScript = "script" + CleanupScript = "script.cleanup" + PrepareScript = "script.prepare" +) + +var Scripts = map[string]bool{ + EntryPointScript: true, + CleanupScript: true, + PrepareScript: true, +} + +func TestAccept(t *testing.T) { + cwd, err := os.Getwd() + require.NoError(t, err) + + execPath := BuildCLI(t, cwd) + // $CLI is what test scripts are using + t.Setenv("CLI", execPath) + + // Make helper scripts available + t.Setenv("PATH", fmt.Sprintf("%s%c%s", filepath.Join(cwd, "bin"), os.PathListSeparator, os.Getenv("PATH"))) + + server := StartServer(t) + AddHandlers(server) + // Redirect API access to local server: + t.Setenv("DATABRICKS_HOST", fmt.Sprintf("http://127.0.0.1:%d", server.Port)) + t.Setenv("DATABRICKS_TOKEN", "dapi1234") + + homeDir := t.TempDir() + // Do not read user's ~/.databrickscfg + t.Setenv(env.HomeEnvVar(), homeDir) + + repls := testdiff.ReplacementsContext{} + repls.Set(execPath, "$CLI") + + testDirs := getTests(t) + require.NotEmpty(t, testDirs) + for _, dir := range testDirs { + t.Run(dir, func(t *testing.T) { + t.Parallel() + runTest(t, dir, repls) + }) + } +} + +func getTests(t *testing.T) []string { + testDirs := make([]string, 0, 128) + + err := filepath.Walk(".", func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + name := filepath.Base(path) + if name == EntryPointScript { + // Presence of 'script' marks a test case in this directory + testDirs = append(testDirs, filepath.Dir(path)) + } + return nil + }) + require.NoError(t, err) + + sort.Strings(testDirs) + return testDirs +} + +func runTest(t *testing.T, dir string, repls testdiff.ReplacementsContext) { + var tmpDir string + var err error + if KeepTmp { + tempDirBase := filepath.Join(os.TempDir(), "acceptance") + _ = os.Mkdir(tempDirBase, 0o755) + tmpDir, err = os.MkdirTemp(tempDirBase, "") + require.NoError(t, err) + t.Logf("Created directory: %s", tmpDir) + } else { + tmpDir = t.TempDir() + } + + scriptContents := readMergedScriptContents(t, dir) + testutil.WriteFile(t, filepath.Join(tmpDir, EntryPointScript), scriptContents) + + inputs := make(map[string]bool, 2) + outputs := make(map[string]bool, 2) + err = CopyDir(dir, tmpDir, inputs, outputs) + require.NoError(t, err) + + args := []string{"bash", "-euo", "pipefail", EntryPointScript} + cmd := exec.Command(args[0], args[1:]...) + cmd.Dir = tmpDir + outB, err := cmd.CombinedOutput() + + out := formatOutput(string(outB), err) + out = repls.Replace(out) + doComparison(t, filepath.Join(dir, "output.txt"), "script output", out) + + for key := range outputs { + if key == "output.txt" { + // handled above + continue + } + pathNew := filepath.Join(tmpDir, key) + newValBytes, err := os.ReadFile(pathNew) + if err != nil { + if errors.Is(err, os.ErrNotExist) { + t.Errorf("%s: expected to find this file but could not (%s)", key, tmpDir) + } else { + t.Errorf("%s: could not read: %s", key, err) + } + continue + } + pathExpected := filepath.Join(dir, key) + newVal := repls.Replace(string(newValBytes)) + doComparison(t, pathExpected, pathNew, newVal) + } + + // Make sure there are not unaccounted for new files + files, err := os.ReadDir(tmpDir) + require.NoError(t, err) + + for _, f := range files { + name := f.Name() + if _, ok := inputs[name]; ok { + continue + } + if _, ok := outputs[name]; ok { + continue + } + t.Errorf("Unexpected output: %s", f) + if strings.HasPrefix(name, "out") { + // We have a new file starting with "out" + // Show the contents & support overwrite mode for it: + pathNew := filepath.Join(tmpDir, name) + newVal := testutil.ReadFile(t, pathNew) + newVal = repls.Replace(newVal) + doComparison(t, filepath.Join(dir, name), filepath.Join(tmpDir, name), newVal) + } + } +} + +func doComparison(t *testing.T, pathExpected, pathNew, valueNew string) { + valueNew = testdiff.NormalizeNewlines(valueNew) + valueExpected := string(readIfExists(t, pathExpected)) + valueExpected = testdiff.NormalizeNewlines(valueExpected) + testdiff.AssertEqualTexts(t, pathExpected, pathNew, valueExpected, valueNew) + if testdiff.OverwriteMode { + if valueNew != "" { + t.Logf("Overwriting: %s", pathExpected) + testutil.WriteFile(t, pathExpected, valueNew) + } else { + t.Logf("Removing: %s", pathExpected) + _ = os.Remove(pathExpected) + } + } +} + +// Returns combined script.prepare (root) + script.prepare (parent) + ... + script + ... + script.cleanup (parent) + ... +// Note, cleanups are not executed if main script fails; that's not a huge issue, since it runs it temp dir. +func readMergedScriptContents(t *testing.T, dir string) string { + scriptContents := testutil.ReadFile(t, filepath.Join(dir, EntryPointScript)) + prepares := []string{} + cleanups := []string{} + + for { + x := readIfExists(t, filepath.Join(dir, CleanupScript)) + if len(x) > 0 { + cleanups = append(cleanups, string(x)) + } + + x = readIfExists(t, filepath.Join(dir, PrepareScript)) + if len(x) > 0 { + prepares = append(prepares, string(x)) + } + + if dir == "" || dir == "." { + break + } + + dir = filepath.Dir(dir) + require.True(t, filepath.IsLocal(dir)) + } + + slices.Reverse(prepares) + prepares = append(prepares, scriptContents) + prepares = append(prepares, cleanups...) + return strings.Join(prepares, "\n") +} + +func BuildCLI(t *testing.T, cwd string) string { + execPath := filepath.Join(cwd, "build", "databricks") + if runtime.GOOS == "windows" { + execPath += ".exe" + } + + start := time.Now() + args := []string{"go", "build", "-mod", "vendor", "-o", execPath} + cmd := exec.Command(args[0], args[1:]...) + cmd.Dir = ".." + out, err := cmd.CombinedOutput() + elapsed := time.Since(start) + t.Logf("%s took %s", args, elapsed) + require.NoError(t, err, "go build failed: %s: %s\n%s", args, err, out) + if len(out) > 0 { + t.Logf("go build output: %s: %s", args, out) + } + + // Quick check + warm up cache: + cmd = exec.Command(execPath, "--version") + out, err = cmd.CombinedOutput() + require.NoError(t, err, "%s --version failed: %s\n%s", execPath, err, out) + return execPath +} + +func copyFile(src, dst string) error { + in, err := os.Open(src) + if err != nil { + return err + } + defer in.Close() + + out, err := os.Create(dst) + if err != nil { + return err + } + defer out.Close() + + _, err = io.Copy(out, in) + return err +} + +func formatOutput(out string, err error) string { + if err == nil { + return out + } + if exiterr, ok := err.(*exec.ExitError); ok { + exitCode := exiterr.ExitCode() + out += fmt.Sprintf("\nExit code: %d\n", exitCode) + } else { + out += fmt.Sprintf("\nError: %s\n", err) + } + return out +} + +func readIfExists(t *testing.T, path string) []byte { + data, err := os.ReadFile(path) + if err == nil { + return data + } + + if !errors.Is(err, os.ErrNotExist) { + t.Fatalf("%s: %s", path, err) + } + return []byte{} +} + +func CopyDir(src, dst string, inputs, outputs map[string]bool) error { + return filepath.Walk(src, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + name := info.Name() + + relPath, err := filepath.Rel(src, path) + if err != nil { + return err + } + + if strings.HasPrefix(name, "out") { + outputs[relPath] = true + return nil + } else { + inputs[relPath] = true + } + + if _, ok := Scripts[name]; ok { + return nil + } + + destPath := filepath.Join(dst, relPath) + + if info.IsDir() { + return os.MkdirAll(destPath, info.Mode()) + } + + return copyFile(path, destPath) + }) +} diff --git a/acceptance/bin/sort_blocks.py b/acceptance/bin/sort_blocks.py new file mode 100755 index 000000000..f50c6f50f --- /dev/null +++ b/acceptance/bin/sort_blocks.py @@ -0,0 +1,21 @@ +#!/usr/bin/env python3 +""" +Helper to sort blocks in text file. A block is a set of lines separated from others by empty line. + +This is to workaround non-determinism in the output. +""" +import sys + +blocks = [] + +for line in sys.stdin: + if not line.strip(): + if blocks and blocks[-1]: + blocks.append('') + continue + if not blocks: + blocks.append('') + blocks[-1] += line + +blocks.sort() +print("\n".join(blocks)) diff --git a/acceptance/build/.gitignore b/acceptance/build/.gitignore new file mode 100644 index 000000000..a48b4db25 --- /dev/null +++ b/acceptance/build/.gitignore @@ -0,0 +1 @@ +databricks diff --git a/bundle/tests/clusters/databricks.yml b/acceptance/bundle/override/clusters/databricks.yml similarity index 92% rename from bundle/tests/clusters/databricks.yml rename to acceptance/bundle/override/clusters/databricks.yml index 1074462a6..14efceec0 100644 --- a/bundle/tests/clusters/databricks.yml +++ b/acceptance/bundle/override/clusters/databricks.yml @@ -1,9 +1,6 @@ bundle: name: clusters -workspace: - host: https://acme.cloud.databricks.com/ - resources: clusters: foo: diff --git a/acceptance/bundle/override/clusters/output.txt b/acceptance/bundle/override/clusters/output.txt new file mode 100644 index 000000000..cff30b3af --- /dev/null +++ b/acceptance/bundle/override/clusters/output.txt @@ -0,0 +1,33 @@ + +>>> $CLI bundle validate -o json -t default +{ + "autoscale": { + "max_workers": 7, + "min_workers": 2 + }, + "cluster_name": "foo", + "custom_tags": {}, + "node_type_id": "i3.xlarge", + "num_workers": 2, + "spark_conf": { + "spark.executor.memory": "2g" + }, + "spark_version": "13.3.x-scala2.12" +} + +>>> $CLI bundle validate -o json -t development +{ + "autoscale": { + "max_workers": 3, + "min_workers": 1 + }, + "cluster_name": "foo-override", + "custom_tags": {}, + "node_type_id": "m5.xlarge", + "num_workers": 3, + "spark_conf": { + "spark.executor.memory": "4g", + "spark.executor.memory2": "4g" + }, + "spark_version": "15.2.x-scala2.12" +} diff --git a/acceptance/bundle/override/clusters/script b/acceptance/bundle/override/clusters/script new file mode 100644 index 000000000..4a73dd93e --- /dev/null +++ b/acceptance/bundle/override/clusters/script @@ -0,0 +1,2 @@ +trace $CLI bundle validate -o json -t default | jq .resources.clusters.foo +trace $CLI bundle validate -o json -t development | jq .resources.clusters.foo diff --git a/bundle/tests/override_job_cluster/databricks.yml b/acceptance/bundle/override/job_cluster/databricks.yml similarity index 91% rename from bundle/tests/override_job_cluster/databricks.yml rename to acceptance/bundle/override/job_cluster/databricks.yml index a85b3b711..d6b7ede4f 100644 --- a/bundle/tests/override_job_cluster/databricks.yml +++ b/acceptance/bundle/override/job_cluster/databricks.yml @@ -1,9 +1,6 @@ bundle: name: override_job_cluster -workspace: - host: https://acme.cloud.databricks.com/ - resources: jobs: foo: diff --git a/acceptance/bundle/override/job_cluster/output.txt b/acceptance/bundle/override/job_cluster/output.txt new file mode 100644 index 000000000..947d19032 --- /dev/null +++ b/acceptance/bundle/override/job_cluster/output.txt @@ -0,0 +1,56 @@ + +>>> $CLI bundle validate -o json -t development +{ + "foo": { + "deployment": { + "kind": "BUNDLE", + "metadata_file_path": "/Workspace/Users/tester@databricks.com/.bundle/override_job_cluster/development/state/metadata.json" + }, + "edit_mode": "UI_LOCKED", + "format": "MULTI_TASK", + "job_clusters": [ + { + "job_cluster_key": "key", + "new_cluster": { + "node_type_id": "i3.xlarge", + "num_workers": 1, + "spark_version": "13.3.x-scala2.12" + } + } + ], + "name": "job", + "permissions": [], + "queue": { + "enabled": true + }, + "tags": {} + } +} + +>>> $CLI bundle validate -o json -t staging +{ + "foo": { + "deployment": { + "kind": "BUNDLE", + "metadata_file_path": "/Workspace/Users/tester@databricks.com/.bundle/override_job_cluster/staging/state/metadata.json" + }, + "edit_mode": "UI_LOCKED", + "format": "MULTI_TASK", + "job_clusters": [ + { + "job_cluster_key": "key", + "new_cluster": { + "node_type_id": "i3.2xlarge", + "num_workers": 4, + "spark_version": "13.3.x-scala2.12" + } + } + ], + "name": "job", + "permissions": [], + "queue": { + "enabled": true + }, + "tags": {} + } +} diff --git a/acceptance/bundle/override/job_cluster/script b/acceptance/bundle/override/job_cluster/script new file mode 100644 index 000000000..4a26c433a --- /dev/null +++ b/acceptance/bundle/override/job_cluster/script @@ -0,0 +1,2 @@ +trace $CLI bundle validate -o json -t development | jq '.resources.jobs' +trace $CLI bundle validate -o json -t staging | jq '.resources.jobs' diff --git a/acceptance/bundle/override/job_cluster_var/databricks.yml b/acceptance/bundle/override/job_cluster_var/databricks.yml new file mode 100644 index 000000000..546cc2d8a --- /dev/null +++ b/acceptance/bundle/override/job_cluster_var/databricks.yml @@ -0,0 +1,37 @@ +bundle: + name: override_job_cluster + +variables: + mykey: + default: key + +resources: + jobs: + foo: + name: job + job_clusters: + - job_cluster_key: key + new_cluster: + spark_version: 13.3.x-scala2.12 + +targets: + development: + resources: + jobs: + foo: + job_clusters: + # This does not work because merging is done before resolution + - job_cluster_key: "${var.mykey}" + new_cluster: + node_type_id: i3.xlarge + num_workers: 1 + + staging: + resources: + jobs: + foo: + job_clusters: + - job_cluster_key: "${var.mykey}" + new_cluster: + node_type_id: i3.2xlarge + num_workers: 4 diff --git a/acceptance/bundle/override/job_cluster_var/output.txt b/acceptance/bundle/override/job_cluster_var/output.txt new file mode 100644 index 000000000..dee2a3b5b --- /dev/null +++ b/acceptance/bundle/override/job_cluster_var/output.txt @@ -0,0 +1,84 @@ + +>>> $CLI bundle validate -o json -t development +{ + "foo": { + "deployment": { + "kind": "BUNDLE", + "metadata_file_path": "/Workspace/Users/tester@databricks.com/.bundle/override_job_cluster/development/state/metadata.json" + }, + "edit_mode": "UI_LOCKED", + "format": "MULTI_TASK", + "job_clusters": [ + { + "job_cluster_key": "key", + "new_cluster": { + "spark_version": "13.3.x-scala2.12" + } + }, + { + "job_cluster_key": "key", + "new_cluster": { + "node_type_id": "i3.xlarge", + "num_workers": 1 + } + } + ], + "name": "job", + "permissions": [], + "queue": { + "enabled": true + }, + "tags": {} + } +} + +>>> $CLI bundle validate -t development +Name: override_job_cluster +Target: development +Workspace: + User: tester@databricks.com + Path: /Workspace/Users/tester@databricks.com/.bundle/override_job_cluster/development + +Validation OK! + +>>> $CLI bundle validate -o json -t staging +{ + "foo": { + "deployment": { + "kind": "BUNDLE", + "metadata_file_path": "/Workspace/Users/tester@databricks.com/.bundle/override_job_cluster/staging/state/metadata.json" + }, + "edit_mode": "UI_LOCKED", + "format": "MULTI_TASK", + "job_clusters": [ + { + "job_cluster_key": "key", + "new_cluster": { + "spark_version": "13.3.x-scala2.12" + } + }, + { + "job_cluster_key": "key", + "new_cluster": { + "node_type_id": "i3.2xlarge", + "num_workers": 4 + } + } + ], + "name": "job", + "permissions": [], + "queue": { + "enabled": true + }, + "tags": {} + } +} + +>>> $CLI bundle validate -t staging +Name: override_job_cluster +Target: staging +Workspace: + User: tester@databricks.com + Path: /Workspace/Users/tester@databricks.com/.bundle/override_job_cluster/staging + +Validation OK! diff --git a/acceptance/bundle/override/job_cluster_var/script b/acceptance/bundle/override/job_cluster_var/script new file mode 100644 index 000000000..1cf373828 --- /dev/null +++ b/acceptance/bundle/override/job_cluster_var/script @@ -0,0 +1,4 @@ +trace $CLI bundle validate -o json -t development | jq '.resources.jobs' +trace $CLI bundle validate -t development +trace $CLI bundle validate -o json -t staging | jq '.resources.jobs' +trace $CLI bundle validate -t staging diff --git a/bundle/tests/override_job_tasks/databricks.yml b/acceptance/bundle/override/job_tasks/databricks.yml similarity index 94% rename from bundle/tests/override_job_tasks/databricks.yml rename to acceptance/bundle/override/job_tasks/databricks.yml index ddee28793..fd7edafb9 100644 --- a/bundle/tests/override_job_tasks/databricks.yml +++ b/acceptance/bundle/override/job_tasks/databricks.yml @@ -1,9 +1,6 @@ bundle: name: override_job_tasks -workspace: - host: https://acme.cloud.databricks.com/ - resources: jobs: foo: diff --git a/acceptance/bundle/override/job_tasks/out.development.stderr.txt b/acceptance/bundle/override/job_tasks/out.development.stderr.txt new file mode 100644 index 000000000..7b6fef0cc --- /dev/null +++ b/acceptance/bundle/override/job_tasks/out.development.stderr.txt @@ -0,0 +1,6 @@ + +>>> errcode $CLI bundle validate -o json -t development +Error: file ./test1.py not found + + +Exit code: 1 diff --git a/acceptance/bundle/override/job_tasks/output.txt b/acceptance/bundle/override/job_tasks/output.txt new file mode 100644 index 000000000..0bb0b1812 --- /dev/null +++ b/acceptance/bundle/override/job_tasks/output.txt @@ -0,0 +1,77 @@ +{ + "name": "job", + "queue": { + "enabled": true + }, + "tags": {}, + "tasks": [ + { + "new_cluster": { + "node_type_id": "i3.xlarge", + "num_workers": 1, + "spark_version": "13.3.x-scala2.12" + }, + "spark_python_task": { + "python_file": "./test1.py" + }, + "task_key": "key1" + }, + { + "new_cluster": { + "spark_version": "13.3.x-scala2.12" + }, + "spark_python_task": { + "python_file": "./test2.py" + }, + "task_key": "key2" + } + ] +} + +>>> errcode $CLI bundle validate -o json -t staging +Error: file ./test1.py not found + + +Exit code: 1 +{ + "name": "job", + "queue": { + "enabled": true + }, + "tags": {}, + "tasks": [ + { + "new_cluster": { + "spark_version": "13.3.x-scala2.12" + }, + "spark_python_task": { + "python_file": "./test1.py" + }, + "task_key": "key1" + }, + { + "new_cluster": { + "node_type_id": "i3.2xlarge", + "num_workers": 4, + "spark_version": "13.3.x-scala2.12" + }, + "spark_python_task": { + "python_file": "./test3.py" + }, + "task_key": "key2" + } + ] +} + +>>> errcode $CLI bundle validate -t staging +Error: file ./test1.py not found + +Name: override_job_tasks +Target: staging +Workspace: + User: tester@databricks.com + Path: /Workspace/Users/tester@databricks.com/.bundle/override_job_tasks/staging + +Found 1 error + +Exit code: 1 diff --git a/acceptance/bundle/override/job_tasks/script b/acceptance/bundle/override/job_tasks/script new file mode 100644 index 000000000..f41729c1e --- /dev/null +++ b/acceptance/bundle/override/job_tasks/script @@ -0,0 +1,3 @@ +trace errcode $CLI bundle validate -o json -t development 2> out.development.stderr.txt | jq .resources.jobs.foo +trace errcode $CLI bundle validate -o json -t staging | jq .resources.jobs.foo +trace errcode $CLI bundle validate -t staging diff --git a/acceptance/bundle/override/merge-string-map/databricks.yml b/acceptance/bundle/override/merge-string-map/databricks.yml new file mode 100644 index 000000000..5e443ceca --- /dev/null +++ b/acceptance/bundle/override/merge-string-map/databricks.yml @@ -0,0 +1,13 @@ +bundle: + name: merge-string-map + +resources: + clusters: + my_cluster: "hello" + +targets: + dev: + resources: + clusters: + my_cluster: + spark_version: "25" diff --git a/acceptance/bundle/override/merge-string-map/output.txt b/acceptance/bundle/override/merge-string-map/output.txt new file mode 100644 index 000000000..986da8174 --- /dev/null +++ b/acceptance/bundle/override/merge-string-map/output.txt @@ -0,0 +1,27 @@ + +>>> $CLI bundle validate -o json -t dev +Warning: expected map, found string + at resources.clusters.my_cluster + in databricks.yml:6:17 + +{ + "clusters": { + "my_cluster": { + "custom_tags": {}, + "spark_version": "25" + } + } +} + +>>> $CLI bundle validate -t dev +Warning: expected map, found string + at resources.clusters.my_cluster + in databricks.yml:6:17 + +Name: merge-string-map +Target: dev +Workspace: + User: tester@databricks.com + Path: /Workspace/Users/tester@databricks.com/.bundle/merge-string-map/dev + +Found 1 warning diff --git a/acceptance/bundle/override/merge-string-map/script b/acceptance/bundle/override/merge-string-map/script new file mode 100644 index 000000000..a109d5f69 --- /dev/null +++ b/acceptance/bundle/override/merge-string-map/script @@ -0,0 +1,2 @@ +trace $CLI bundle validate -o json -t dev | jq .resources +trace $CLI bundle validate -t dev diff --git a/bundle/tests/override_pipeline_cluster/databricks.yml b/acceptance/bundle/override/pipeline_cluster/databricks.yml similarity index 90% rename from bundle/tests/override_pipeline_cluster/databricks.yml rename to acceptance/bundle/override/pipeline_cluster/databricks.yml index 8930f30e8..8b4857460 100644 --- a/bundle/tests/override_pipeline_cluster/databricks.yml +++ b/acceptance/bundle/override/pipeline_cluster/databricks.yml @@ -1,9 +1,6 @@ bundle: name: override_pipeline_cluster -workspace: - host: https://acme.cloud.databricks.com/ - resources: pipelines: foo: diff --git a/acceptance/bundle/override/pipeline_cluster/output.txt b/acceptance/bundle/override/pipeline_cluster/output.txt new file mode 100644 index 000000000..81bf58180 --- /dev/null +++ b/acceptance/bundle/override/pipeline_cluster/output.txt @@ -0,0 +1,44 @@ + +>>> $CLI bundle validate -o json -t development +{ + "foo": { + "clusters": [ + { + "label": "default", + "node_type_id": "i3.xlarge", + "num_workers": 1, + "spark_conf": { + "foo": "bar" + } + } + ], + "deployment": { + "kind": "BUNDLE", + "metadata_file_path": "/Workspace/Users/tester@databricks.com/.bundle/override_pipeline_cluster/development/state/metadata.json" + }, + "name": "job", + "permissions": [] + } +} + +>>> $CLI bundle validate -o json -t staging +{ + "foo": { + "clusters": [ + { + "label": "default", + "node_type_id": "i3.2xlarge", + "num_workers": 4, + "spark_conf": { + "foo": "bar" + } + } + ], + "deployment": { + "kind": "BUNDLE", + "metadata_file_path": "/Workspace/Users/tester@databricks.com/.bundle/override_pipeline_cluster/staging/state/metadata.json" + }, + "name": "job", + "permissions": [] + } +} diff --git a/acceptance/bundle/override/pipeline_cluster/script b/acceptance/bundle/override/pipeline_cluster/script new file mode 100644 index 000000000..b06005ce5 --- /dev/null +++ b/acceptance/bundle/override/pipeline_cluster/script @@ -0,0 +1,2 @@ +trace $CLI bundle validate -o json -t development | jq .resources.pipelines +trace $CLI bundle validate -o json -t staging | jq .resources.pipelines diff --git a/bundle/tests/undefined_resources/databricks.yml b/acceptance/bundle/undefined_resources/databricks.yml similarity index 100% rename from bundle/tests/undefined_resources/databricks.yml rename to acceptance/bundle/undefined_resources/databricks.yml diff --git a/acceptance/bundle/undefined_resources/output.txt b/acceptance/bundle/undefined_resources/output.txt new file mode 100644 index 000000000..29b51bc1a --- /dev/null +++ b/acceptance/bundle/undefined_resources/output.txt @@ -0,0 +1,19 @@ +Error: experiment undefined-experiment is not defined + at resources.experiments.undefined-experiment + in databricks.yml:11:26 + +Error: job undefined-job is not defined + at resources.jobs.undefined-job + in databricks.yml:6:19 + +Error: pipeline undefined-pipeline is not defined + at resources.pipelines.undefined-pipeline + in databricks.yml:14:24 + +Found 3 errors + +Name: undefined-job +Target: default + + +Exit code: 1 diff --git a/acceptance/bundle/undefined_resources/script b/acceptance/bundle/undefined_resources/script new file mode 100644 index 000000000..10a3c485a --- /dev/null +++ b/acceptance/bundle/undefined_resources/script @@ -0,0 +1,2 @@ +# We need sort_blocks.py because the order of diagnostics is currently randomized +$CLI bundle validate 2>&1 | sort_blocks.py diff --git a/acceptance/bundle/variables/complex-transitive/databricks.yml b/acceptance/bundle/variables/complex-transitive/databricks.yml new file mode 100644 index 000000000..9ef4e6386 --- /dev/null +++ b/acceptance/bundle/variables/complex-transitive/databricks.yml @@ -0,0 +1,19 @@ +bundle: + name: complex-transitive + +variables: + catalog: + default: hive_metastore + spark_conf: + default: + "spark.databricks.sql.initial.catalog.name": ${var.catalog} + etl_cluster_config: + type: complex + default: + spark_version: 14.3.x-scala2.12 + runtime_engine: PHOTON + spark_conf: ${var.spark_conf} + +resources: + clusters: + my_cluster: ${var.etl_cluster_config} diff --git a/acceptance/bundle/variables/complex-transitive/output.txt b/acceptance/bundle/variables/complex-transitive/output.txt new file mode 100644 index 000000000..a031e0497 --- /dev/null +++ b/acceptance/bundle/variables/complex-transitive/output.txt @@ -0,0 +1,3 @@ +{ + "spark.databricks.sql.initial.catalog.name": "${var.catalog}" +} diff --git a/acceptance/bundle/variables/complex-transitive/script b/acceptance/bundle/variables/complex-transitive/script new file mode 100644 index 000000000..52bb08ed4 --- /dev/null +++ b/acceptance/bundle/variables/complex-transitive/script @@ -0,0 +1,2 @@ +# Currently, this incorrectly outputs variable reference instead of resolved value +$CLI bundle validate -o json | jq '.resources.clusters.my_cluster.spark_conf' diff --git a/bundle/tests/variables/complex/databricks.yml b/acceptance/bundle/variables/complex/databricks.yml similarity index 82% rename from bundle/tests/variables/complex/databricks.yml rename to acceptance/bundle/variables/complex/databricks.yml index 3b32a7c8e..5dcc30b08 100644 --- a/bundle/tests/variables/complex/databricks.yml +++ b/acceptance/bundle/variables/complex/databricks.yml @@ -11,6 +11,7 @@ resources: - task_key: test job_cluster_key: key libraries: ${variables.libraries.value} + # specific fields of complex variable are referenced: task_key: "task with spark version ${var.cluster.spark_version} and jar ${var.libraries[0].jar}" variables: @@ -35,30 +36,21 @@ variables: - jar: "/path/to/jar" - egg: "/path/to/egg" - whl: "/path/to/whl" - complexvar: - type: complex - description: "A complex variable" - default: - key1: "value1" - key2: "value2" - key3: "value3" - targets: default: + default: true dev: variables: node_type: "Standard_DS3_v3" cluster: + # complex variables are not merged, so missing variables (policy_id) are not inherited spark_version: "14.2.x-scala2.11" node_type_id: ${var.node_type} num_workers: 4 spark_conf: spark.speculation: false spark.databricks.delta.retentionDurationCheck.enabled: false - complexvar: - type: complex - default: - key1: "1" - key2: "2" - key3: "3" + libraries: + - jar: "/newpath/to/jar" + - whl: "/newpath/to/whl" diff --git a/acceptance/bundle/variables/complex/out.default.json b/acceptance/bundle/variables/complex/out.default.json new file mode 100644 index 000000000..6454562a6 --- /dev/null +++ b/acceptance/bundle/variables/complex/out.default.json @@ -0,0 +1,110 @@ +{ + "resources": { + "jobs": { + "my_job": { + "deployment": { + "kind": "BUNDLE", + "metadata_file_path": "/Workspace/Users/tester@databricks.com/.bundle/complex-variables/default/state/metadata.json" + }, + "edit_mode": "UI_LOCKED", + "format": "MULTI_TASK", + "job_clusters": [ + { + "job_cluster_key": "key", + "new_cluster": { + "node_type_id": "Standard_DS3_v2", + "num_workers": 2, + "policy_id": "some-policy-id", + "spark_conf": { + "spark.databricks.delta.retentionDurationCheck.enabled": "false", + "spark.random": "true", + "spark.speculation": "true" + }, + "spark_version": "13.2.x-scala2.11" + } + } + ], + "permissions": [], + "queue": { + "enabled": true + }, + "tags": {}, + "tasks": [ + { + "job_cluster_key": "key", + "libraries": [ + { + "jar": "/path/to/jar" + }, + { + "egg": "/path/to/egg" + }, + { + "whl": "/path/to/whl" + } + ], + "task_key": "task with spark version 13.2.x-scala2.11 and jar /path/to/jar" + } + ] + } + } + }, + "variables": { + "cluster": { + "default": { + "node_type_id": "Standard_DS3_v2", + "num_workers": 2, + "policy_id": "some-policy-id", + "spark_conf": { + "spark.databricks.delta.retentionDurationCheck.enabled": false, + "spark.random": true, + "spark.speculation": true + }, + "spark_version": "13.2.x-scala2.11" + }, + "description": "A cluster definition", + "type": "complex", + "value": { + "node_type_id": "Standard_DS3_v2", + "num_workers": 2, + "policy_id": "some-policy-id", + "spark_conf": { + "spark.databricks.delta.retentionDurationCheck.enabled": false, + "spark.random": true, + "spark.speculation": true + }, + "spark_version": "13.2.x-scala2.11" + } + }, + "libraries": { + "default": [ + { + "jar": "/path/to/jar" + }, + { + "egg": "/path/to/egg" + }, + { + "whl": "/path/to/whl" + } + ], + "description": "A libraries definition", + "type": "complex", + "value": [ + { + "jar": "/path/to/jar" + }, + { + "egg": "/path/to/egg" + }, + { + "whl": "/path/to/whl" + } + ] + }, + "node_type": { + "default": "Standard_DS3_v2", + "value": "Standard_DS3_v2" + } + } +} diff --git a/acceptance/bundle/variables/complex/out.dev.json b/acceptance/bundle/variables/complex/out.dev.json new file mode 100644 index 000000000..cede5feb2 --- /dev/null +++ b/acceptance/bundle/variables/complex/out.dev.json @@ -0,0 +1,95 @@ +{ + "resources": { + "jobs": { + "my_job": { + "deployment": { + "kind": "BUNDLE", + "metadata_file_path": "/Workspace/Users/tester@databricks.com/.bundle/complex-variables/dev/state/metadata.json" + }, + "edit_mode": "UI_LOCKED", + "format": "MULTI_TASK", + "job_clusters": [ + { + "job_cluster_key": "key", + "new_cluster": { + "node_type_id": "Standard_DS3_v3", + "num_workers": 4, + "spark_conf": { + "spark.databricks.delta.retentionDurationCheck.enabled": "false", + "spark.speculation": "false" + }, + "spark_version": "14.2.x-scala2.11" + } + } + ], + "permissions": [], + "queue": { + "enabled": true + }, + "tags": {}, + "tasks": [ + { + "job_cluster_key": "key", + "libraries": [ + { + "jar": "/newpath/to/jar" + }, + { + "whl": "/newpath/to/whl" + } + ], + "task_key": "task with spark version 14.2.x-scala2.11 and jar /newpath/to/jar" + } + ] + } + } + }, + "variables": { + "cluster": { + "default": { + "node_type_id": "Standard_DS3_v3", + "num_workers": 4, + "spark_conf": { + "spark.databricks.delta.retentionDurationCheck.enabled": false, + "spark.speculation": false + }, + "spark_version": "14.2.x-scala2.11" + }, + "description": "A cluster definition", + "type": "complex", + "value": { + "node_type_id": "Standard_DS3_v3", + "num_workers": 4, + "spark_conf": { + "spark.databricks.delta.retentionDurationCheck.enabled": false, + "spark.speculation": false + }, + "spark_version": "14.2.x-scala2.11" + } + }, + "libraries": { + "default": [ + { + "jar": "/newpath/to/jar" + }, + { + "whl": "/newpath/to/whl" + } + ], + "description": "A libraries definition", + "type": "complex", + "value": [ + { + "jar": "/newpath/to/jar" + }, + { + "whl": "/newpath/to/whl" + } + ] + }, + "node_type": { + "default": "Standard_DS3_v3", + "value": "Standard_DS3_v3" + } + } +} diff --git a/acceptance/bundle/variables/complex/output.txt b/acceptance/bundle/variables/complex/output.txt new file mode 100644 index 000000000..ce295421f --- /dev/null +++ b/acceptance/bundle/variables/complex/output.txt @@ -0,0 +1,14 @@ + +>>> $CLI bundle validate -o json + +>>> jq .resources.jobs.my_job.tasks[0].task_key out.default.json +"task with spark version 13.2.x-scala2.11 and jar /path/to/jar" + +>>> $CLI bundle validate -o json -t dev + +>>> jq .resources.jobs.my_job.tasks[0].task_key out.dev.json +"task with spark version 14.2.x-scala2.11 and jar /newpath/to/jar" +policy_id and spark_conf.spark_random fields do not exist in dev target: + +>>> jq .resources.jobs.my_job.job_clusters[0].new_cluster.policy_id out.dev.json +null diff --git a/acceptance/bundle/variables/complex/script b/acceptance/bundle/variables/complex/script new file mode 100644 index 000000000..f8b61f18d --- /dev/null +++ b/acceptance/bundle/variables/complex/script @@ -0,0 +1,8 @@ +trace $CLI bundle validate -o json | jq '{resources,variables}' > out.default.json +trace jq .resources.jobs.my_job.tasks[0].task_key out.default.json | grep "task with spark version 13.2.x-scala2.11 and jar /path/to/jar" + +trace $CLI bundle validate -o json -t dev | jq '{resources,variables}' > out.dev.json +trace jq .resources.jobs.my_job.tasks[0].task_key out.dev.json | grep "task with spark version 14.2.x-scala2.11 and jar /newpath/to/jar" + +echo policy_id and spark_conf.spark_random fields do not exist in dev target: +trace jq .resources.jobs.my_job.job_clusters[0].new_cluster.policy_id out.dev.json | grep null diff --git a/bundle/tests/variables/complex_multiple_files/databricks.yml b/acceptance/bundle/variables/complex_multiple_files/databricks.yml similarity index 100% rename from bundle/tests/variables/complex_multiple_files/databricks.yml rename to acceptance/bundle/variables/complex_multiple_files/databricks.yml diff --git a/acceptance/bundle/variables/complex_multiple_files/output.txt b/acceptance/bundle/variables/complex_multiple_files/output.txt new file mode 100644 index 000000000..e87b8df11 --- /dev/null +++ b/acceptance/bundle/variables/complex_multiple_files/output.txt @@ -0,0 +1,159 @@ +{ + "resources": { + "jobs": { + "my_job": { + "deployment": { + "kind": "BUNDLE", + "metadata_file_path": "/Workspace/Users/tester@databricks.com/.bundle/complex-variables-multiple-files/dev/state/metadata.json" + }, + "edit_mode": "UI_LOCKED", + "format": "MULTI_TASK", + "job_clusters": [ + { + "job_cluster_key": "key1", + "new_cluster": { + "node_type_id": "Standard_DS3_v2", + "num_workers": 4, + "spark_conf": { + "spark.databricks.delta.retentionDurationCheck.enabled": "false", + "spark.speculation": "false" + }, + "spark_version": "14.2.x-scala2.11" + } + }, + { + "job_cluster_key": "key2", + "new_cluster": { + "node_type_id": "Standard_DS3_v2", + "num_workers": 4, + "spark_conf": { + "spark.databricks.delta.retentionDurationCheck.enabled": "false", + "spark.speculation": "false" + }, + "spark_version": "14.2.x-scala2.11" + } + }, + { + "job_cluster_key": "key3", + "new_cluster": { + "node_type_id": "Standard_DS3_v2", + "num_workers": 4, + "spark_conf": { + "spark.databricks.delta.retentionDurationCheck.enabled": "false", + "spark.speculation": "false" + }, + "spark_version": "14.2.x-scala2.11" + } + }, + { + "job_cluster_key": "key4", + "new_cluster": { + "node_type_id": "Standard_DS3_v2", + "num_workers": 4, + "spark_conf": { + "spark.databricks.delta.retentionDurationCheck.enabled": "false", + "spark.speculation": "false" + }, + "spark_version": "14.2.x-scala2.11" + } + } + ], + "permissions": [], + "queue": { + "enabled": true + }, + "tags": {} + } + } + }, + "variables": { + "cluster1": { + "default": { + "node_type_id": "Standard_DS3_v2", + "num_workers": 4, + "spark_conf": { + "spark.databricks.delta.retentionDurationCheck.enabled": false, + "spark.speculation": false + }, + "spark_version": "14.2.x-scala2.11" + }, + "description": "A cluster definition", + "type": "complex", + "value": { + "node_type_id": "Standard_DS3_v2", + "num_workers": 4, + "spark_conf": { + "spark.databricks.delta.retentionDurationCheck.enabled": false, + "spark.speculation": false + }, + "spark_version": "14.2.x-scala2.11" + } + }, + "cluster2": { + "default": { + "node_type_id": "Standard_DS3_v2", + "num_workers": 4, + "spark_conf": { + "spark.databricks.delta.retentionDurationCheck.enabled": false, + "spark.speculation": false + }, + "spark_version": "14.2.x-scala2.11" + }, + "description": "A cluster definition", + "type": "complex", + "value": { + "node_type_id": "Standard_DS3_v2", + "num_workers": 4, + "spark_conf": { + "spark.databricks.delta.retentionDurationCheck.enabled": false, + "spark.speculation": false + }, + "spark_version": "14.2.x-scala2.11" + } + }, + "cluster3": { + "default": { + "node_type_id": "Standard_DS3_v2", + "num_workers": 4, + "spark_conf": { + "spark.databricks.delta.retentionDurationCheck.enabled": false, + "spark.speculation": false + }, + "spark_version": "14.2.x-scala2.11" + }, + "description": "A cluster definition", + "type": "complex", + "value": { + "node_type_id": "Standard_DS3_v2", + "num_workers": 4, + "spark_conf": { + "spark.databricks.delta.retentionDurationCheck.enabled": false, + "spark.speculation": false + }, + "spark_version": "14.2.x-scala2.11" + } + }, + "cluster4": { + "default": { + "node_type_id": "Standard_DS3_v2", + "num_workers": 4, + "spark_conf": { + "spark.databricks.delta.retentionDurationCheck.enabled": false, + "spark.speculation": false + }, + "spark_version": "14.2.x-scala2.11" + }, + "description": "A cluster definition", + "type": "complex", + "value": { + "node_type_id": "Standard_DS3_v2", + "num_workers": 4, + "spark_conf": { + "spark.databricks.delta.retentionDurationCheck.enabled": false, + "spark.speculation": false + }, + "spark_version": "14.2.x-scala2.11" + } + } + } +} diff --git a/acceptance/bundle/variables/complex_multiple_files/script b/acceptance/bundle/variables/complex_multiple_files/script new file mode 100644 index 000000000..24f1d58d5 --- /dev/null +++ b/acceptance/bundle/variables/complex_multiple_files/script @@ -0,0 +1 @@ +$CLI bundle validate -t dev -o json | jq '{resources, variables}' diff --git a/bundle/tests/variables/complex_multiple_files/variables/clusters.yml b/acceptance/bundle/variables/complex_multiple_files/variables/clusters.yml similarity index 100% rename from bundle/tests/variables/complex_multiple_files/variables/clusters.yml rename to acceptance/bundle/variables/complex_multiple_files/variables/clusters.yml diff --git a/bundle/tests/variables/empty/databricks.yml b/acceptance/bundle/variables/empty/databricks.yml similarity index 100% rename from bundle/tests/variables/empty/databricks.yml rename to acceptance/bundle/variables/empty/databricks.yml diff --git a/acceptance/bundle/variables/empty/output.txt b/acceptance/bundle/variables/empty/output.txt new file mode 100644 index 000000000..c3f0af130 --- /dev/null +++ b/acceptance/bundle/variables/empty/output.txt @@ -0,0 +1,11 @@ +Error: no value assigned to required variable a. Assignment can be done through the "--var" flag or by setting the BUNDLE_VAR_a environment variable + +Name: empty${var.a} +Target: default +Workspace: + User: tester@databricks.com + Path: /Workspace/Users/tester@databricks.com/.bundle/empty${var.a}/default + +Found 1 error + +Exit code: 1 diff --git a/acceptance/bundle/variables/empty/script b/acceptance/bundle/variables/empty/script new file mode 100644 index 000000000..72555b332 --- /dev/null +++ b/acceptance/bundle/variables/empty/script @@ -0,0 +1 @@ +$CLI bundle validate diff --git a/bundle/tests/variables/env_overrides/databricks.yml b/acceptance/bundle/variables/env_overrides/databricks.yml similarity index 100% rename from bundle/tests/variables/env_overrides/databricks.yml rename to acceptance/bundle/variables/env_overrides/databricks.yml diff --git a/acceptance/bundle/variables/env_overrides/output.txt b/acceptance/bundle/variables/env_overrides/output.txt new file mode 100644 index 000000000..e8fb99938 --- /dev/null +++ b/acceptance/bundle/variables/env_overrides/output.txt @@ -0,0 +1,40 @@ + +>>> $CLI bundle validate -t env-with-single-variable-override -o json +"default-a dev-b" + +>>> $CLI bundle validate -t env-with-two-variable-overrides -o json +"prod-a prod-b" + +>>> BUNDLE_VAR_b=env-var-b $CLI bundle validate -t env-with-two-variable-overrides -o json +"prod-a env-var-b" + +>>> errcode $CLI bundle validate -t env-missing-a-required-variable-assignment +Error: no value assigned to required variable b. Assignment can be done through the "--var" flag or by setting the BUNDLE_VAR_b environment variable + +Name: test bundle +Target: env-missing-a-required-variable-assignment +Workspace: + User: tester@databricks.com + Path: /Workspace/Users/tester@databricks.com/.bundle/test bundle/env-missing-a-required-variable-assignment + +Found 1 error + +Exit code: 1 + +>>> errcode $CLI bundle validate -t env-using-an-undefined-variable +Error: variable c is not defined but is assigned a value + +Name: test bundle + +Found 1 error + +Exit code: 1 + +>>> $CLI bundle validate -t env-overrides-lookup -o json +{ + "a": "default-a", + "b": "prod-b", + "d": "4321", + "e": "1234", + "f": "9876" +} diff --git a/acceptance/bundle/variables/env_overrides/script b/acceptance/bundle/variables/env_overrides/script new file mode 100644 index 000000000..30919fd8a --- /dev/null +++ b/acceptance/bundle/variables/env_overrides/script @@ -0,0 +1,6 @@ +trace $CLI bundle validate -t env-with-single-variable-override -o json | jq .workspace.profile +trace $CLI bundle validate -t env-with-two-variable-overrides -o json | jq .workspace.profile +trace BUNDLE_VAR_b=env-var-b $CLI bundle validate -t env-with-two-variable-overrides -o json | jq .workspace.profile +trace errcode $CLI bundle validate -t env-missing-a-required-variable-assignment +trace errcode $CLI bundle validate -t env-using-an-undefined-variable +trace $CLI bundle validate -t env-overrides-lookup -o json | jq '.variables | map_values(.value)' diff --git a/acceptance/bundle/variables/resolve-builtin/databricks.yml b/acceptance/bundle/variables/resolve-builtin/databricks.yml new file mode 100644 index 000000000..4bb71c8db --- /dev/null +++ b/acceptance/bundle/variables/resolve-builtin/databricks.yml @@ -0,0 +1,6 @@ +bundle: + name: TestResolveVariableReferences + +workspace: + root_path: "${bundle.name}/bar" + file_path: "${workspace.root_path}/baz" diff --git a/acceptance/bundle/variables/resolve-builtin/output.txt b/acceptance/bundle/variables/resolve-builtin/output.txt new file mode 100644 index 000000000..2f58abd8a --- /dev/null +++ b/acceptance/bundle/variables/resolve-builtin/output.txt @@ -0,0 +1,11 @@ +{ + "artifact_path": "TestResolveVariableReferences/bar/artifacts", + "current_user": { + "short_name": "tester", + "userName": "tester@databricks.com" + }, + "file_path": "TestResolveVariableReferences/bar/baz", + "resource_path": "TestResolveVariableReferences/bar/resources", + "root_path": "TestResolveVariableReferences/bar", + "state_path": "TestResolveVariableReferences/bar/state" +} diff --git a/acceptance/bundle/variables/resolve-builtin/script b/acceptance/bundle/variables/resolve-builtin/script new file mode 100644 index 000000000..fefd9abe6 --- /dev/null +++ b/acceptance/bundle/variables/resolve-builtin/script @@ -0,0 +1 @@ +$CLI bundle validate -o json | jq .workspace diff --git a/acceptance/bundle/variables/resolve-empty/databricks.yml b/acceptance/bundle/variables/resolve-empty/databricks.yml new file mode 100644 index 000000000..7563ada34 --- /dev/null +++ b/acceptance/bundle/variables/resolve-empty/databricks.yml @@ -0,0 +1,10 @@ +bundle: + name: TestResolveVariableReferencesToEmptyFields + git: + branch: "" + +resources: + jobs: + job1: + tags: + git_branch: "${bundle.git.branch}" diff --git a/acceptance/bundle/variables/resolve-empty/output.txt b/acceptance/bundle/variables/resolve-empty/output.txt new file mode 100644 index 000000000..a05cbbf54 --- /dev/null +++ b/acceptance/bundle/variables/resolve-empty/output.txt @@ -0,0 +1,3 @@ +{ + "git_branch": "" +} diff --git a/acceptance/bundle/variables/resolve-empty/script b/acceptance/bundle/variables/resolve-empty/script new file mode 100644 index 000000000..614673054 --- /dev/null +++ b/acceptance/bundle/variables/resolve-empty/script @@ -0,0 +1 @@ +$CLI bundle validate -o json | jq .resources.jobs.job1.tags diff --git a/acceptance/bundle/variables/resolve-field-within-complex/databricks.yml b/acceptance/bundle/variables/resolve-field-within-complex/databricks.yml new file mode 100644 index 000000000..7250dd5df --- /dev/null +++ b/acceptance/bundle/variables/resolve-field-within-complex/databricks.yml @@ -0,0 +1,16 @@ +bundle: + name: TestResolveComplexVariableReferencesToFields + +variables: + cluster: + type: "complex" + default: + node_type_id: "Standard_DS3_v2" + num_workers: 2 + +resources: + jobs: + job1: + job_clusters: + - new_cluster: + node_type_id: "${var.cluster.node_type_id}" diff --git a/acceptance/bundle/variables/resolve-field-within-complex/output.txt b/acceptance/bundle/variables/resolve-field-within-complex/output.txt new file mode 100644 index 000000000..1f6bdbbf4 --- /dev/null +++ b/acceptance/bundle/variables/resolve-field-within-complex/output.txt @@ -0,0 +1,3 @@ +{ + "node_type_id": "Standard_DS3_v2" +} diff --git a/acceptance/bundle/variables/resolve-field-within-complex/script b/acceptance/bundle/variables/resolve-field-within-complex/script new file mode 100644 index 000000000..a885870a5 --- /dev/null +++ b/acceptance/bundle/variables/resolve-field-within-complex/script @@ -0,0 +1 @@ +$CLI bundle validate -o json | jq .resources.jobs.job1.job_clusters[0].new_cluster diff --git a/bundle/tests/variables/vanilla/databricks.yml b/acceptance/bundle/variables/vanilla/databricks.yml similarity index 100% rename from bundle/tests/variables/vanilla/databricks.yml rename to acceptance/bundle/variables/vanilla/databricks.yml diff --git a/acceptance/bundle/variables/vanilla/output.txt b/acceptance/bundle/variables/vanilla/output.txt new file mode 100644 index 000000000..69b358a3f --- /dev/null +++ b/acceptance/bundle/variables/vanilla/output.txt @@ -0,0 +1,16 @@ + +>>> BUNDLE_VAR_b=def $CLI bundle validate -o json +"abc def" + +>>> errcode $CLI bundle validate +Error: no value assigned to required variable b. Assignment can be done through the "--var" flag or by setting the BUNDLE_VAR_b environment variable + +Name: ${var.a} ${var.b} +Target: default +Workspace: + User: tester@databricks.com + Path: /Workspace/Users/tester@databricks.com/.bundle/${var.a} ${var.b}/default + +Found 1 error + +Exit code: 1 diff --git a/acceptance/bundle/variables/vanilla/script b/acceptance/bundle/variables/vanilla/script new file mode 100644 index 000000000..10da3183d --- /dev/null +++ b/acceptance/bundle/variables/vanilla/script @@ -0,0 +1,2 @@ +trace BUNDLE_VAR_b=def $CLI bundle validate -o json | jq .bundle.name +trace errcode $CLI bundle validate diff --git a/bundle/tests/variables/variable_overrides_in_target/databricks.yml b/acceptance/bundle/variables/variable_overrides_in_target/databricks.yml similarity index 100% rename from bundle/tests/variables/variable_overrides_in_target/databricks.yml rename to acceptance/bundle/variables/variable_overrides_in_target/databricks.yml diff --git a/acceptance/bundle/variables/variable_overrides_in_target/output.txt b/acceptance/bundle/variables/variable_overrides_in_target/output.txt new file mode 100644 index 000000000..de193f5b6 --- /dev/null +++ b/acceptance/bundle/variables/variable_overrides_in_target/output.txt @@ -0,0 +1,84 @@ + +>>> $CLI bundle validate -o json -t use-default-variable-values +{ + "pipelines": { + "my_pipeline": { + "clusters": [ + { + "label": "default", + "num_workers": 42 + } + ], + "continuous": true, + "deployment": { + "kind": "BUNDLE", + "metadata_file_path": "/Workspace/Users/tester@databricks.com/.bundle/foobar/use-default-variable-values/state/metadata.json" + }, + "name": "a_string", + "permissions": [] + } + } +} + +>>> $CLI bundle validate -o json -t override-string-variable +{ + "pipelines": { + "my_pipeline": { + "clusters": [ + { + "label": "default", + "num_workers": 42 + } + ], + "continuous": true, + "deployment": { + "kind": "BUNDLE", + "metadata_file_path": "/Workspace/Users/tester@databricks.com/.bundle/foobar/override-string-variable/state/metadata.json" + }, + "name": "overridden_string", + "permissions": [] + } + } +} + +>>> $CLI bundle validate -o json -t override-int-variable +{ + "pipelines": { + "my_pipeline": { + "clusters": [ + { + "label": "default", + "num_workers": 43 + } + ], + "continuous": true, + "deployment": { + "kind": "BUNDLE", + "metadata_file_path": "/Workspace/Users/tester@databricks.com/.bundle/foobar/override-int-variable/state/metadata.json" + }, + "name": "a_string", + "permissions": [] + } + } +} + +>>> $CLI bundle validate -o json -t override-both-bool-and-string-variables +{ + "pipelines": { + "my_pipeline": { + "clusters": [ + { + "label": "default", + "num_workers": 42 + } + ], + "continuous": false, + "deployment": { + "kind": "BUNDLE", + "metadata_file_path": "/Workspace/Users/tester@databricks.com/.bundle/foobar/override-both-bool-and-string-variables/state/metadata.json" + }, + "name": "overridden_string", + "permissions": [] + } + } +} diff --git a/acceptance/bundle/variables/variable_overrides_in_target/script b/acceptance/bundle/variables/variable_overrides_in_target/script new file mode 100644 index 000000000..686b3102a --- /dev/null +++ b/acceptance/bundle/variables/variable_overrides_in_target/script @@ -0,0 +1,4 @@ +trace $CLI bundle validate -o json -t use-default-variable-values | jq .resources +trace $CLI bundle validate -o json -t override-string-variable | jq .resources +trace $CLI bundle validate -o json -t override-int-variable | jq .resources +trace $CLI bundle validate -o json -t override-both-bool-and-string-variables | jq .resources diff --git a/bundle/tests/variables/without_definition/databricks.yml b/acceptance/bundle/variables/without_definition/databricks.yml similarity index 53% rename from bundle/tests/variables/without_definition/databricks.yml rename to acceptance/bundle/variables/without_definition/databricks.yml index 68227b683..c26a85f56 100644 --- a/bundle/tests/variables/without_definition/databricks.yml +++ b/acceptance/bundle/variables/without_definition/databricks.yml @@ -1,3 +1,5 @@ +bundle: + name: x variables: a: b: diff --git a/acceptance/bundle/variables/without_definition/output.txt b/acceptance/bundle/variables/without_definition/output.txt new file mode 100644 index 000000000..4dd1e6609 --- /dev/null +++ b/acceptance/bundle/variables/without_definition/output.txt @@ -0,0 +1,4 @@ +{ + "a": "foo", + "b": "bar" +} diff --git a/acceptance/bundle/variables/without_definition/script b/acceptance/bundle/variables/without_definition/script new file mode 100644 index 000000000..49b9b5448 --- /dev/null +++ b/acceptance/bundle/variables/without_definition/script @@ -0,0 +1 @@ +BUNDLE_VAR_a=foo BUNDLE_VAR_b=bar $CLI bundle validate -o json | jq '.variables | map_values(.value)' diff --git a/acceptance/help/output.txt b/acceptance/help/output.txt new file mode 100644 index 000000000..ed4a88ce6 --- /dev/null +++ b/acceptance/help/output.txt @@ -0,0 +1,143 @@ +Databricks CLI + +Usage: + databricks [command] + +Databricks Workspace + fs Filesystem related commands + git-credentials Registers personal access token for Databricks to do operations on behalf of the user. + repos The Repos API allows users to manage their git repos. + secrets The Secrets API allows you to manage secrets, secret scopes, and access permissions. + workspace The Workspace API allows you to list, import, export, and delete notebooks and folders. + +Compute + cluster-policies You can use cluster policies to control users' ability to configure clusters based on a set of rules. + clusters The Clusters API allows you to create, start, edit, list, terminate, and delete clusters. + global-init-scripts The Global Init Scripts API enables Workspace administrators to configure global initialization scripts for their workspace. + instance-pools Instance Pools API are used to create, edit, delete and list instance pools by using ready-to-use cloud instances which reduces a cluster start and auto-scaling times. + instance-profiles The Instance Profiles API allows admins to add, list, and remove instance profiles that users can launch clusters with. + libraries The Libraries API allows you to install and uninstall libraries and get the status of libraries on a cluster. + policy-compliance-for-clusters The policy compliance APIs allow you to view and manage the policy compliance status of clusters in your workspace. + policy-families View available policy families. + +Workflows + jobs The Jobs API allows you to create, edit, and delete jobs. + policy-compliance-for-jobs The compliance APIs allow you to view and manage the policy compliance status of jobs in your workspace. + +Delta Live Tables + pipelines The Delta Live Tables API allows you to create, edit, delete, start, and view details about pipelines. + +Machine Learning + experiments Experiments are the primary unit of organization in MLflow; all MLflow runs belong to an experiment. + model-registry Note: This API reference documents APIs for the Workspace Model Registry. + +Real-time Serving + serving-endpoints The Serving Endpoints API allows you to create, update, and delete model serving endpoints. + +Identity and Access Management + current-user This API allows retrieving information about currently authenticated user or service principal. + groups Groups simplify identity management, making it easier to assign access to Databricks workspace, data, and other securable objects. + permissions Permissions API are used to create read, write, edit, update and manage access for various users on different objects and endpoints. + service-principals Identities for use with jobs, automated tools, and systems such as scripts, apps, and CI/CD platforms. + users User identities recognized by Databricks and represented by email addresses. + +Databricks SQL + alerts The alerts API can be used to perform CRUD operations on alerts. + alerts-legacy The alerts API can be used to perform CRUD operations on alerts. + dashboards In general, there is little need to modify dashboards using the API. + data-sources This API is provided to assist you in making new query objects. + queries The queries API can be used to perform CRUD operations on queries. + queries-legacy These endpoints are used for CRUD operations on query definitions. + query-history A service responsible for storing and retrieving the list of queries run against SQL endpoints and serverless compute. + warehouses A SQL warehouse is a compute resource that lets you run SQL commands on data objects within Databricks SQL. + +Unity Catalog + artifact-allowlists In Databricks Runtime 13.3 and above, you can add libraries and init scripts to the allowlist in UC so that users can leverage these artifacts on compute configured with shared access mode. + catalogs A catalog is the first layer of Unity Catalog’s three-level namespace. + connections Connections allow for creating a connection to an external data source. + credentials A credential represents an authentication and authorization mechanism for accessing services on your cloud tenant. + external-locations An external location is an object that combines a cloud storage path with a storage credential that authorizes access to the cloud storage path. + functions Functions implement User-Defined Functions (UDFs) in Unity Catalog. + grants In Unity Catalog, data is secure by default. + metastores A metastore is the top-level container of objects in Unity Catalog. + model-versions Databricks provides a hosted version of MLflow Model Registry in Unity Catalog. + online-tables Online tables provide lower latency and higher QPS access to data from Delta tables. + quality-monitors A monitor computes and monitors data or model quality metrics for a table over time. + registered-models Databricks provides a hosted version of MLflow Model Registry in Unity Catalog. + resource-quotas Unity Catalog enforces resource quotas on all securable objects, which limits the number of resources that can be created. + schemas A schema (also called a database) is the second layer of Unity Catalog’s three-level namespace. + storage-credentials A storage credential represents an authentication and authorization mechanism for accessing data stored on your cloud tenant. + system-schemas A system schema is a schema that lives within the system catalog. + table-constraints Primary key and foreign key constraints encode relationships between fields in tables. + tables A table resides in the third layer of Unity Catalog’s three-level namespace. + temporary-table-credentials Temporary Table Credentials refer to short-lived, downscoped credentials used to access cloud storage locationswhere table data is stored in Databricks. + volumes Volumes are a Unity Catalog (UC) capability for accessing, storing, governing, organizing and processing files. + workspace-bindings A securable in Databricks can be configured as __OPEN__ or __ISOLATED__. + +Delta Sharing + providers A data provider is an object representing the organization in the real world who shares the data. + recipient-activation The Recipient Activation API is only applicable in the open sharing model where the recipient object has the authentication type of TOKEN. + recipients A recipient is an object you create using :method:recipients/create to represent an organization which you want to allow access shares. + shares A share is a container instantiated with :method:shares/create. + +Settings + ip-access-lists IP Access List enables admins to configure IP access lists. + notification-destinations The notification destinations API lets you programmatically manage a workspace's notification destinations. + settings Workspace Settings API allows users to manage settings at the workspace level. + token-management Enables administrators to get all tokens and delete tokens for other users. + tokens The Token API allows you to create, list, and revoke tokens that can be used to authenticate and access Databricks REST APIs. + workspace-conf This API allows updating known workspace settings for advanced users. + +Developer Tools + bundle Databricks Asset Bundles let you express data/AI/analytics projects as code. + sync Synchronize a local directory to a workspace directory + +Vector Search + vector-search-endpoints **Endpoint**: Represents the compute resources to host vector search indexes. + vector-search-indexes **Index**: An efficient representation of your embedding vectors that supports real-time and efficient approximate nearest neighbor (ANN) search queries. + +Dashboards + lakeview These APIs provide specific management operations for Lakeview dashboards. + +Marketplace + consumer-fulfillments Fulfillments are entities that allow consumers to preview installations. + consumer-installations Installations are entities that allow consumers to interact with Databricks Marketplace listings. + consumer-listings Listings are the core entities in the Marketplace. + consumer-personalization-requests Personalization Requests allow customers to interact with the individualized Marketplace listing flow. + consumer-providers Providers are the entities that publish listings to the Marketplace. + provider-exchange-filters Marketplace exchanges filters curate which groups can access an exchange. + provider-exchanges Marketplace exchanges allow providers to share their listings with a curated set of customers. + provider-files Marketplace offers a set of file APIs for various purposes such as preview notebooks and provider icons. + provider-listings Listings are the core entities in the Marketplace. + provider-personalization-requests Personalization requests are an alternate to instantly available listings. + provider-provider-analytics-dashboards Manage templated analytics solution for providers. + provider-providers Providers are entities that manage assets in Marketplace. + +Apps + apps Apps run directly on a customer’s Databricks instance, integrate with their data, use and extend Databricks services, and enable users to interact through single sign-on. + apps Apps run directly on a customer’s Databricks instance, integrate with their data, use and extend Databricks services, and enable users to interact through single sign-on. + +Clean Rooms + clean-room-assets Clean room assets are data and code objects — Tables, volumes, and notebooks that are shared with the clean room. + clean-room-task-runs Clean room task runs are the executions of notebooks in a clean room. + clean-rooms A clean room uses Delta Sharing and serverless compute to provide a secure and privacy-protecting environment where multiple parties can work together on sensitive enterprise data without direct access to each other’s data. + +Additional Commands: + account Databricks Account Commands + api Perform Databricks API call + auth Authentication related commands + completion Generate the autocompletion script for the specified shell + configure Configure authentication + help Help about any command + labs Manage Databricks Labs installations + version Retrieve information about the current version of this CLI + +Flags: + --debug enable debug logging + -h, --help help for databricks + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + -v, --version version for databricks + +Use "databricks [command] --help" for more information about a command. diff --git a/acceptance/help/script b/acceptance/help/script new file mode 100644 index 000000000..5fa569470 --- /dev/null +++ b/acceptance/help/script @@ -0,0 +1 @@ +$CLI diff --git a/acceptance/script.cleanup b/acceptance/script.cleanup new file mode 100644 index 000000000..3c3e29ebc --- /dev/null +++ b/acceptance/script.cleanup @@ -0,0 +1 @@ +rm -fr .databricks .gitignore diff --git a/acceptance/script.prepare b/acceptance/script.prepare new file mode 100644 index 000000000..3f1bb2acc --- /dev/null +++ b/acceptance/script.prepare @@ -0,0 +1,36 @@ +# Prevent CLI from downloading terraform in each test: +export DATABRICKS_TF_EXEC_PATH=/tmp/ + +errcode() { + # Temporarily disable 'set -e' to prevent the script from exiting on error + set +e + # Execute the provided command with all arguments + "$@" + local exit_code=$? + # Re-enable 'set -e' if it was previously set + set -e + >&2 printf "\nExit code: $exit_code\n" +} + +trace() { + >&2 printf "\n>>> %s\n" "$*" + + if [[ "$1" == *"="* ]]; then + # If the first argument contains '=', collect all env vars + local env_vars=() + while [[ "$1" == *"="* ]]; do + env_vars+=("$1") + shift + done + # Export environment variables in a subshell and execute the command + ( + export "${env_vars[@]}" + "$@" + ) + else + # Execute the command normally + "$@" + fi + + return $? +} diff --git a/acceptance/server_test.go b/acceptance/server_test.go new file mode 100644 index 000000000..7b21e198f --- /dev/null +++ b/acceptance/server_test.go @@ -0,0 +1,129 @@ +package acceptance_test + +import ( + "encoding/json" + "net" + "net/http" + "net/http/httptest" + "testing" + + "github.com/databricks/databricks-sdk-go/service/compute" + "github.com/databricks/databricks-sdk-go/service/iam" + "github.com/databricks/databricks-sdk-go/service/workspace" +) + +type TestServer struct { + *httptest.Server + Mux *http.ServeMux + Port int +} + +type HandlerFunc func(r *http.Request) (any, error) + +func NewTestServer() *TestServer { + mux := http.NewServeMux() + server := httptest.NewServer(mux) + port := server.Listener.Addr().(*net.TCPAddr).Port + + return &TestServer{ + Server: server, + Mux: mux, + Port: port, + } +} + +func (s *TestServer) Handle(pattern string, handler HandlerFunc) { + s.Mux.HandleFunc(pattern, func(w http.ResponseWriter, r *http.Request) { + resp, err := handler(r) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.Header().Set("Content-Type", "application/json") + + var respBytes []byte + + respString, ok := resp.(string) + if ok { + respBytes = []byte(respString) + } else { + respBytes, err = json.MarshalIndent(resp, "", " ") + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + } + + if _, err := w.Write(respBytes); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + }) +} + +func StartServer(t *testing.T) *TestServer { + server := NewTestServer() + t.Cleanup(func() { + server.Close() + }) + return server +} + +func AddHandlers(server *TestServer) { + server.Handle("/api/2.0/policies/clusters/list", func(r *http.Request) (any, error) { + return compute.ListPoliciesResponse{ + Policies: []compute.Policy{ + { + PolicyId: "5678", + Name: "wrong-cluster-policy", + }, + { + PolicyId: "9876", + Name: "some-test-cluster-policy", + }, + }, + }, nil + }) + + server.Handle("/api/2.0/instance-pools/list", func(r *http.Request) (any, error) { + return compute.ListInstancePools{ + InstancePools: []compute.InstancePoolAndStats{ + { + InstancePoolName: "some-test-instance-pool", + InstancePoolId: "1234", + }, + }, + }, nil + }) + + server.Handle("/api/2.1/clusters/list", func(r *http.Request) (any, error) { + return compute.ListClustersResponse{ + Clusters: []compute.ClusterDetails{ + { + ClusterName: "some-test-cluster", + ClusterId: "4321", + }, + { + ClusterName: "some-other-cluster", + ClusterId: "9876", + }, + }, + }, nil + }) + + server.Handle("/api/2.0/preview/scim/v2/Me", func(r *http.Request) (any, error) { + return iam.User{ + UserName: "tester@databricks.com", + }, nil + }) + + server.Handle("/api/2.0/workspace/get-status", func(r *http.Request) (any, error) { + return workspace.ObjectInfo{ + ObjectId: 1001, + ObjectType: "DIRECTORY", + Path: "", + ResourceId: "1001", + }, nil + }) +} diff --git a/bundle/artifacts/all.go b/bundle/artifacts/all.go index 305193e2e..768ccdfe3 100644 --- a/bundle/artifacts/all.go +++ b/bundle/artifacts/all.go @@ -3,7 +3,6 @@ package artifacts import ( "context" "fmt" - "slices" "github.com/databricks/cli/bundle" diff --git a/bundle/artifacts/autodetect.go b/bundle/artifacts/autodetect.go index 569a480f0..c8d235616 100644 --- a/bundle/artifacts/autodetect.go +++ b/bundle/artifacts/autodetect.go @@ -13,8 +13,7 @@ func DetectPackages() bundle.Mutator { return &autodetect{} } -type autodetect struct { -} +type autodetect struct{} func (m *autodetect) Name() string { return "artifacts.DetectPackages" diff --git a/bundle/artifacts/expand_globs.go b/bundle/artifacts/expand_globs.go index cdf3d4590..c0af7c69e 100644 --- a/bundle/artifacts/expand_globs.go +++ b/bundle/artifacts/expand_globs.go @@ -96,9 +96,8 @@ func (m *expandGlobs) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnost // Set the expanded globs back into the configuration. return dyn.SetByPath(v, base, dyn.V(output)) }) - if err != nil { - return diag.FromErr(err) + diags = diags.Extend(diag.FromErr(err)) } return diags diff --git a/bundle/artifacts/expand_globs_test.go b/bundle/artifacts/expand_globs_test.go index dc7c77de7..264c52c50 100644 --- a/bundle/artifacts/expand_globs_test.go +++ b/bundle/artifacts/expand_globs_test.go @@ -2,7 +2,6 @@ package artifacts import ( "context" - "fmt" "path/filepath" "testing" @@ -88,16 +87,16 @@ func TestExpandGlobs_InvalidPattern(t *testing.T) { )) assert.Len(t, diags, 4) - assert.Equal(t, fmt.Sprintf("%s: syntax error in pattern", filepath.Clean("a[.txt")), diags[0].Summary) + assert.Equal(t, filepath.Clean("a[.txt")+": syntax error in pattern", diags[0].Summary) assert.Equal(t, filepath.Join(tmpDir, "databricks.yml"), diags[0].Locations[0].File) assert.Equal(t, "artifacts.test.files[0].source", diags[0].Paths[0].String()) - assert.Equal(t, fmt.Sprintf("%s: syntax error in pattern", filepath.Clean("a[.txt")), diags[1].Summary) + assert.Equal(t, filepath.Clean("a[.txt")+": syntax error in pattern", diags[1].Summary) assert.Equal(t, filepath.Join(tmpDir, "databricks.yml"), diags[1].Locations[0].File) assert.Equal(t, "artifacts.test.files[1].source", diags[1].Paths[0].String()) - assert.Equal(t, fmt.Sprintf("%s: syntax error in pattern", filepath.Clean("../a[.txt")), diags[2].Summary) + assert.Equal(t, filepath.Clean("../a[.txt")+": syntax error in pattern", diags[2].Summary) assert.Equal(t, filepath.Join(tmpDir, "databricks.yml"), diags[2].Locations[0].File) assert.Equal(t, "artifacts.test.files[2].source", diags[2].Paths[0].String()) - assert.Equal(t, fmt.Sprintf("%s: syntax error in pattern", filepath.Clean("subdir/a[.txt")), diags[3].Summary) + assert.Equal(t, filepath.Clean("subdir/a[.txt")+": syntax error in pattern", diags[3].Summary) assert.Equal(t, filepath.Join(tmpDir, "databricks.yml"), diags[3].Locations[0].File) assert.Equal(t, "artifacts.test.files[3].source", diags[3].Paths[0].String()) } diff --git a/bundle/artifacts/whl/autodetect.go b/bundle/artifacts/whl/autodetect.go index 88dc742c1..202ea12bc 100644 --- a/bundle/artifacts/whl/autodetect.go +++ b/bundle/artifacts/whl/autodetect.go @@ -15,8 +15,7 @@ import ( "github.com/databricks/cli/libs/log" ) -type detectPkg struct { -} +type detectPkg struct{} func DetectPackage() bundle.Mutator { return &detectPkg{} @@ -42,7 +41,7 @@ func (m *detectPkg) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostic return nil } - log.Infof(ctx, fmt.Sprintf("Found Python wheel project at %s", b.BundleRootPath)) + log.Infof(ctx, "Found Python wheel project at %s", b.BundleRootPath) module := extractModuleName(setupPy) if b.Config.Artifacts == nil { diff --git a/bundle/artifacts/whl/infer.go b/bundle/artifacts/whl/infer.go index cb727de0e..9c40360be 100644 --- a/bundle/artifacts/whl/infer.go +++ b/bundle/artifacts/whl/infer.go @@ -16,12 +16,6 @@ type infer struct { func (m *infer) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { artifact := b.Config.Artifacts[m.name] - // TODO use python.DetectVEnvExecutable once bundle has a way to specify venv path - py, err := python.DetectExecutable(ctx) - if err != nil { - return diag.FromErr(err) - } - // Note: using --build-number (build tag) flag does not help with re-installing // libraries on all-purpose clusters. The reason is that `pip` ignoring build tag // when upgrading the library and only look at wheel version. @@ -36,7 +30,9 @@ func (m *infer) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { // version=datetime.datetime.utcnow().strftime("%Y%m%d.%H%M%S"), // ... //) - artifact.BuildCommand = fmt.Sprintf(`"%s" setup.py bdist_wheel`, py) + + py := python.GetExecutable() + artifact.BuildCommand = py + " setup.py bdist_wheel" return nil } diff --git a/bundle/bundle.go b/bundle/bundle.go index 41f12b27a..3bf4ffb62 100644 --- a/bundle/bundle.go +++ b/bundle/bundle.go @@ -8,6 +8,7 @@ package bundle import ( "context" + "errors" "fmt" "os" "path/filepath" @@ -189,7 +190,7 @@ func (b *Bundle) CacheDir(ctx context.Context, paths ...string) (string, error) // Make directory if it doesn't exist yet. dir := filepath.Join(parts...) - err := os.MkdirAll(dir, 0700) + err := os.MkdirAll(dir, 0o700) if err != nil { return "", err } @@ -206,7 +207,7 @@ func (b *Bundle) InternalDir(ctx context.Context) (string, error) { } dir := filepath.Join(cacheDir, internalFolder) - err = os.MkdirAll(dir, 0700) + err = os.MkdirAll(dir, 0o700) if err != nil { return dir, err } @@ -237,7 +238,7 @@ func (b *Bundle) GetSyncIncludePatterns(ctx context.Context) ([]string, error) { // we call into from this bundle context. func (b *Bundle) AuthEnv() (map[string]string, error) { if b.client == nil { - return nil, fmt.Errorf("workspace client not initialized yet") + return nil, errors.New("workspace client not initialized yet") } cfg := b.client.Config diff --git a/bundle/bundle_test.go b/bundle/bundle_test.go index 1c3102357..d52088e34 100644 --- a/bundle/bundle_test.go +++ b/bundle/bundle_test.go @@ -2,7 +2,6 @@ package bundle import ( "context" - "errors" "io/fs" "os" "path/filepath" @@ -16,7 +15,7 @@ import ( func TestLoadNotExists(t *testing.T) { b, err := Load(context.Background(), "/doesntexist") - assert.True(t, errors.Is(err, fs.ErrNotExist)) + assert.ErrorIs(t, err, fs.ErrNotExist) assert.Nil(t, b) } diff --git a/bundle/config/artifact.go b/bundle/config/artifact.go index 9a5690f57..177799e11 100644 --- a/bundle/config/artifact.go +++ b/bundle/config/artifact.go @@ -2,7 +2,7 @@ package config import ( "context" - "fmt" + "errors" "github.com/databricks/cli/libs/exec" ) @@ -37,7 +37,7 @@ type Artifact struct { func (a *Artifact) Build(ctx context.Context) ([]byte, error) { if a.BuildCommand == "" { - return nil, fmt.Errorf("no build property defined") + return nil, errors.New("no build property defined") } var e *exec.Executor diff --git a/bundle/config/experimental.go b/bundle/config/experimental.go index 061bbdae0..7ecac5d7d 100644 --- a/bundle/config/experimental.go +++ b/bundle/config/experimental.go @@ -27,9 +27,33 @@ type Experimental struct { // PyDABs determines whether to load the 'databricks-pydabs' package. // // PyDABs allows to define bundle configuration using Python. + // PyDABs is deprecated use Python instead. PyDABs PyDABs `json:"pydabs,omitempty"` + + // Python configures loading of Python code defined with 'databricks-bundles' package. + Python Python `json:"python,omitempty"` } +type Python struct { + // Resources contains a list of fully qualified function paths to load resources + // defined in Python code. + // + // Example: ["my_project.resources:load_resources"] + Resources []string `json:"resources"` + + // Mutators contains a list of fully qualified function paths to mutator functions. + // + // Example: ["my_project.mutators:add_default_cluster"] + Mutators []string `json:"mutators"` + + // VEnvPath is path to the virtual environment. + // + // If enabled, Python code will execute within this environment. If disabled, + // it defaults to using the Python interpreter available in the current shell. + VEnvPath string `json:"venv_path,omitempty"` +} + +// PyDABs is deprecated use Python instead type PyDABs struct { // Enabled is a flag to enable the feature. Enabled bool `json:"enabled,omitempty"` @@ -47,8 +71,10 @@ type PyDABs struct { Import []string `json:"import,omitempty"` } -type Command string -type ScriptHook string +type ( + Command string + ScriptHook string +) // These hook names are subject to change and currently experimental const ( diff --git a/bundle/config/generate/job.go b/bundle/config/generate/job.go index 6cd7c1b32..0cdcbf3ad 100644 --- a/bundle/config/generate/job.go +++ b/bundle/config/generate/job.go @@ -6,8 +6,10 @@ import ( "github.com/databricks/databricks-sdk-go/service/jobs" ) -var jobOrder = yamlsaver.NewOrder([]string{"name", "job_clusters", "compute", "tasks"}) -var taskOrder = yamlsaver.NewOrder([]string{"task_key", "depends_on", "existing_cluster_id", "new_cluster", "job_cluster_key"}) +var ( + jobOrder = yamlsaver.NewOrder([]string{"name", "job_clusters", "compute", "tasks"}) + taskOrder = yamlsaver.NewOrder([]string{"task_key", "depends_on", "existing_cluster_id", "new_cluster", "job_cluster_key"}) +) func ConvertJobToValue(job *jobs.Job) (dyn.Value, error) { value := make(map[string]dyn.Value) diff --git a/bundle/config/loader/process_root_includes.go b/bundle/config/loader/process_root_includes.go index c14fb7ce1..c608a3de6 100644 --- a/bundle/config/loader/process_root_includes.go +++ b/bundle/config/loader/process_root_includes.go @@ -27,7 +27,7 @@ func (m *processRootIncludes) Apply(ctx context.Context, b *bundle.Bundle) diag. var out []bundle.Mutator // Map with files we've already seen to avoid loading them twice. - var seen = map[string]bool{} + seen := map[string]bool{} for _, file := range config.FileNames { seen[file] = true diff --git a/bundle/config/mutator/apply_presets.go b/bundle/config/mutator/apply_presets.go index 381703756..59b8547be 100644 --- a/bundle/config/mutator/apply_presets.go +++ b/bundle/config/mutator/apply_presets.go @@ -9,7 +9,6 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" - "github.com/databricks/cli/libs/dbr" "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/dyn" "github.com/databricks/cli/libs/textutil" @@ -222,27 +221,6 @@ func (m *applyPresets) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnos dashboard.DisplayName = prefix + dashboard.DisplayName } - if config.IsExplicitlyEnabled((b.Config.Presets.SourceLinkedDeployment)) { - isDatabricksWorkspace := dbr.RunsOnRuntime(ctx) && strings.HasPrefix(b.SyncRootPath, "/Workspace/") - if !isDatabricksWorkspace { - target := b.Config.Bundle.Target - path := dyn.NewPath(dyn.Key("targets"), dyn.Key(target), dyn.Key("presets"), dyn.Key("source_linked_deployment")) - diags = diags.Append( - diag.Diagnostic{ - Severity: diag.Warning, - Summary: "source-linked deployment is available only in the Databricks Workspace", - Paths: []dyn.Path{ - path, - }, - Locations: b.Config.GetLocations(path[2:].String()), - }, - ) - - disabled := false - b.Config.Presets.SourceLinkedDeployment = &disabled - } - } - return diags } diff --git a/bundle/config/mutator/apply_presets_test.go b/bundle/config/mutator/apply_presets_test.go index 91d5b62e5..5e3f942cc 100644 --- a/bundle/config/mutator/apply_presets_test.go +++ b/bundle/config/mutator/apply_presets_test.go @@ -2,16 +2,12 @@ package mutator_test import ( "context" - "runtime" "testing" "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config/mutator" "github.com/databricks/cli/bundle/config/resources" - "github.com/databricks/cli/bundle/internal/bundletest" - "github.com/databricks/cli/libs/dbr" - "github.com/databricks/cli/libs/dyn" "github.com/databricks/databricks-sdk-go/service/catalog" "github.com/databricks/databricks-sdk-go/service/jobs" "github.com/stretchr/testify/require" @@ -398,88 +394,3 @@ func TestApplyPresetsResourceNotDefined(t *testing.T) { }) } } - -func TestApplyPresetsSourceLinkedDeployment(t *testing.T) { - if runtime.GOOS == "windows" { - t.Skip("this test is not applicable on Windows because source-linked mode works only in the Databricks Workspace") - } - - testContext := context.Background() - enabled := true - disabled := false - workspacePath := "/Workspace/user.name@company.com" - - tests := []struct { - bundlePath string - ctx context.Context - name string - initialValue *bool - expectedValue *bool - expectedWarning string - }{ - { - name: "preset enabled, bundle in Workspace, databricks runtime", - bundlePath: workspacePath, - ctx: dbr.MockRuntime(testContext, true), - initialValue: &enabled, - expectedValue: &enabled, - }, - { - name: "preset enabled, bundle not in Workspace, databricks runtime", - bundlePath: "/Users/user.name@company.com", - ctx: dbr.MockRuntime(testContext, true), - initialValue: &enabled, - expectedValue: &disabled, - expectedWarning: "source-linked deployment is available only in the Databricks Workspace", - }, - { - name: "preset enabled, bundle in Workspace, not databricks runtime", - bundlePath: workspacePath, - ctx: dbr.MockRuntime(testContext, false), - initialValue: &enabled, - expectedValue: &disabled, - expectedWarning: "source-linked deployment is available only in the Databricks Workspace", - }, - { - name: "preset disabled, bundle in Workspace, databricks runtime", - bundlePath: workspacePath, - ctx: dbr.MockRuntime(testContext, true), - initialValue: &disabled, - expectedValue: &disabled, - }, - { - name: "preset nil, bundle in Workspace, databricks runtime", - bundlePath: workspacePath, - ctx: dbr.MockRuntime(testContext, true), - initialValue: nil, - expectedValue: nil, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - b := &bundle.Bundle{ - SyncRootPath: tt.bundlePath, - Config: config.Root{ - Presets: config.Presets{ - SourceLinkedDeployment: tt.initialValue, - }, - }, - } - - bundletest.SetLocation(b, "presets.source_linked_deployment", []dyn.Location{{File: "databricks.yml"}}) - diags := bundle.Apply(tt.ctx, b, mutator.ApplyPresets()) - if diags.HasError() { - t.Fatalf("unexpected error: %v", diags) - } - - if tt.expectedWarning != "" { - require.Equal(t, tt.expectedWarning, diags[0].Summary) - require.NotEmpty(t, diags[0].Locations) - } - - require.Equal(t, tt.expectedValue, b.Config.Presets.SourceLinkedDeployment) - }) - } - -} diff --git a/bundle/config/mutator/apply_source_linked_deployment_preset.go b/bundle/config/mutator/apply_source_linked_deployment_preset.go new file mode 100644 index 000000000..78ccc5322 --- /dev/null +++ b/bundle/config/mutator/apply_source_linked_deployment_preset.go @@ -0,0 +1,75 @@ +package mutator + +import ( + "context" + "strings" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/libs/dbr" + "github.com/databricks/cli/libs/diag" + "github.com/databricks/cli/libs/dyn" +) + +type applySourceLinkedDeploymentPreset struct{} + +// Apply source-linked deployment preset +func ApplySourceLinkedDeploymentPreset() *applySourceLinkedDeploymentPreset { + return &applySourceLinkedDeploymentPreset{} +} + +func (m *applySourceLinkedDeploymentPreset) Name() string { + return "ApplySourceLinkedDeploymentPreset" +} + +func (m *applySourceLinkedDeploymentPreset) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + if config.IsExplicitlyDisabled(b.Config.Presets.SourceLinkedDeployment) { + return nil + } + + var diags diag.Diagnostics + isDatabricksWorkspace := dbr.RunsOnRuntime(ctx) && strings.HasPrefix(b.SyncRootPath, "/Workspace/") + target := b.Config.Bundle.Target + + if config.IsExplicitlyEnabled((b.Config.Presets.SourceLinkedDeployment)) { + if !isDatabricksWorkspace { + path := dyn.NewPath(dyn.Key("targets"), dyn.Key(target), dyn.Key("presets"), dyn.Key("source_linked_deployment")) + diags = diags.Append( + diag.Diagnostic{ + Severity: diag.Warning, + Summary: "source-linked deployment is available only in the Databricks Workspace", + Paths: []dyn.Path{ + path, + }, + Locations: b.Config.GetLocations(path[2:].String()), + }, + ) + + disabled := false + b.Config.Presets.SourceLinkedDeployment = &disabled + return diags + } + } + + if isDatabricksWorkspace && b.Config.Bundle.Mode == config.Development { + enabled := true + b.Config.Presets.SourceLinkedDeployment = &enabled + } + + if b.Config.Workspace.FilePath != "" && config.IsExplicitlyEnabled(b.Config.Presets.SourceLinkedDeployment) { + path := dyn.NewPath(dyn.Key("targets"), dyn.Key(target), dyn.Key("workspace"), dyn.Key("file_path")) + + diags = diags.Append( + diag.Diagnostic{ + Severity: diag.Warning, + Summary: "workspace.file_path setting will be ignored in source-linked deployment mode", + Paths: []dyn.Path{ + path[2:], + }, + Locations: b.Config.GetLocations(path[2:].String()), + }, + ) + } + + return diags +} diff --git a/bundle/config/mutator/apply_source_linked_deployment_preset_test.go b/bundle/config/mutator/apply_source_linked_deployment_preset_test.go new file mode 100644 index 000000000..1b74fd8e9 --- /dev/null +++ b/bundle/config/mutator/apply_source_linked_deployment_preset_test.go @@ -0,0 +1,122 @@ +package mutator_test + +import ( + "context" + "runtime" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/config/mutator" + "github.com/databricks/cli/bundle/internal/bundletest" + "github.com/databricks/cli/libs/dbr" + "github.com/databricks/cli/libs/dyn" + "github.com/stretchr/testify/require" +) + +func TestApplyPresetsSourceLinkedDeployment(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("this test is not applicable on Windows because source-linked mode works only in the Databricks Workspace") + } + + testContext := context.Background() + enabled := true + disabled := false + workspacePath := "/Workspace/user.name@company.com" + + tests := []struct { + name string + ctx context.Context + mutateBundle func(b *bundle.Bundle) + initialValue *bool + expectedValue *bool + expectedWarning string + }{ + { + name: "preset enabled, bundle in Workspace, databricks runtime", + ctx: dbr.MockRuntime(testContext, true), + initialValue: &enabled, + expectedValue: &enabled, + }, + { + name: "preset enabled, bundle not in Workspace, databricks runtime", + ctx: dbr.MockRuntime(testContext, true), + mutateBundle: func(b *bundle.Bundle) { + b.SyncRootPath = "/Users/user.name@company.com" + }, + initialValue: &enabled, + expectedValue: &disabled, + expectedWarning: "source-linked deployment is available only in the Databricks Workspace", + }, + { + name: "preset enabled, bundle in Workspace, not databricks runtime", + ctx: dbr.MockRuntime(testContext, false), + initialValue: &enabled, + expectedValue: &disabled, + expectedWarning: "source-linked deployment is available only in the Databricks Workspace", + }, + { + name: "preset disabled, bundle in Workspace, databricks runtime", + ctx: dbr.MockRuntime(testContext, true), + initialValue: &disabled, + expectedValue: &disabled, + }, + { + name: "preset nil, bundle in Workspace, databricks runtime", + ctx: dbr.MockRuntime(testContext, true), + initialValue: nil, + expectedValue: nil, + }, + { + name: "preset nil, dev mode true, bundle in Workspace, databricks runtime", + ctx: dbr.MockRuntime(testContext, true), + mutateBundle: func(b *bundle.Bundle) { + b.Config.Bundle.Mode = config.Development + }, + initialValue: nil, + expectedValue: &enabled, + }, + { + name: "preset enabled, workspace.file_path is defined by user", + ctx: dbr.MockRuntime(testContext, true), + mutateBundle: func(b *bundle.Bundle) { + b.Config.Workspace.FilePath = "file_path" + }, + initialValue: &enabled, + expectedValue: &enabled, + expectedWarning: "workspace.file_path setting will be ignored in source-linked deployment mode", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + b := &bundle.Bundle{ + SyncRootPath: workspacePath, + Config: config.Root{ + Presets: config.Presets{ + SourceLinkedDeployment: tt.initialValue, + }, + }, + } + + if tt.mutateBundle != nil { + tt.mutateBundle(b) + } + + bundletest.SetLocation(b, "presets.source_linked_deployment", []dyn.Location{{File: "databricks.yml"}}) + bundletest.SetLocation(b, "workspace.file_path", []dyn.Location{{File: "databricks.yml"}}) + + diags := bundle.Apply(tt.ctx, b, mutator.ApplySourceLinkedDeploymentPreset()) + if diags.HasError() { + t.Fatalf("unexpected error: %v", diags) + } + + if tt.expectedWarning != "" { + require.Equal(t, tt.expectedWarning, diags[0].Summary) + require.NotEmpty(t, diags[0].Locations) + } + + require.Equal(t, tt.expectedValue, b.Config.Presets.SourceLinkedDeployment) + }) + } +} diff --git a/bundle/config/mutator/compute_id_compat.go b/bundle/config/mutator/compute_id_compat.go index 3afe02e9e..8f1ff5868 100644 --- a/bundle/config/mutator/compute_id_compat.go +++ b/bundle/config/mutator/compute_id_compat.go @@ -42,7 +42,6 @@ func rewriteComputeIdToClusterId(v dyn.Value, p dyn.Path) (dyn.Value, diag.Diagn var diags diag.Diagnostics computeIdPath := p.Append(dyn.Key("compute_id")) computeId, err := dyn.GetByPath(v, computeIdPath) - // If the "compute_id" key is not set, we don't need to do anything. if err != nil { return v, nil diff --git a/bundle/config/mutator/configure_dashboard_defaults_test.go b/bundle/config/mutator/configure_dashboard_defaults_test.go index 2234f9a73..9794d355c 100644 --- a/bundle/config/mutator/configure_dashboard_defaults_test.go +++ b/bundle/config/mutator/configure_dashboard_defaults_test.go @@ -109,19 +109,19 @@ func TestConfigureDashboardDefaultsEmbedCredentials(t *testing.T) { // Set to true; still true. v, err = dyn.Get(b.Config.Value(), "resources.dashboards.d1.embed_credentials") if assert.NoError(t, err) { - assert.Equal(t, true, v.MustBool()) + assert.True(t, v.MustBool()) } // Set to false; still false. v, err = dyn.Get(b.Config.Value(), "resources.dashboards.d2.embed_credentials") if assert.NoError(t, err) { - assert.Equal(t, false, v.MustBool()) + assert.False(t, v.MustBool()) } // Not set; now false. v, err = dyn.Get(b.Config.Value(), "resources.dashboards.d3.embed_credentials") if assert.NoError(t, err) { - assert.Equal(t, false, v.MustBool()) + assert.False(t, v.MustBool()) } // No valid dashboard; no change. diff --git a/bundle/config/mutator/default_queueing_test.go b/bundle/config/mutator/default_queueing_test.go index d3621663b..4c521812e 100644 --- a/bundle/config/mutator/default_queueing_test.go +++ b/bundle/config/mutator/default_queueing_test.go @@ -28,8 +28,8 @@ func TestDefaultQueueingApplyNoJobs(t *testing.T) { }, } d := bundle.Apply(context.Background(), b, DefaultQueueing()) - assert.Len(t, d, 0) - assert.Len(t, b.Config.Resources.Jobs, 0) + assert.Empty(t, d) + assert.Empty(t, b.Config.Resources.Jobs) } func TestDefaultQueueingApplyJobsAlreadyEnabled(t *testing.T) { @@ -47,7 +47,7 @@ func TestDefaultQueueingApplyJobsAlreadyEnabled(t *testing.T) { }, } d := bundle.Apply(context.Background(), b, DefaultQueueing()) - assert.Len(t, d, 0) + assert.Empty(t, d) assert.True(t, b.Config.Resources.Jobs["job"].Queue.Enabled) } @@ -66,7 +66,7 @@ func TestDefaultQueueingApplyEnableQueueing(t *testing.T) { }, } d := bundle.Apply(context.Background(), b, DefaultQueueing()) - assert.Len(t, d, 0) + assert.Empty(t, d) assert.NotNil(t, b.Config.Resources.Jobs["job"].Queue) assert.True(t, b.Config.Resources.Jobs["job"].Queue.Enabled) } @@ -96,7 +96,7 @@ func TestDefaultQueueingApplyWithMultipleJobs(t *testing.T) { }, } d := bundle.Apply(context.Background(), b, DefaultQueueing()) - assert.Len(t, d, 0) + assert.Empty(t, d) assert.False(t, b.Config.Resources.Jobs["job1"].Queue.Enabled) assert.True(t, b.Config.Resources.Jobs["job2"].Queue.Enabled) assert.True(t, b.Config.Resources.Jobs["job3"].Queue.Enabled) diff --git a/bundle/config/mutator/environments_compat_test.go b/bundle/config/mutator/environments_compat_test.go index 8a2129847..11facf9fb 100644 --- a/bundle/config/mutator/environments_compat_test.go +++ b/bundle/config/mutator/environments_compat_test.go @@ -44,7 +44,7 @@ func TestEnvironmentsToTargetsWithEnvironmentsDefined(t *testing.T) { diags := bundle.Apply(context.Background(), b, mutator.EnvironmentsToTargets()) require.NoError(t, diags.Error()) - assert.Len(t, b.Config.Environments, 0) + assert.Empty(t, b.Config.Environments) assert.Len(t, b.Config.Targets, 1) } @@ -61,6 +61,6 @@ func TestEnvironmentsToTargetsWithTargetsDefined(t *testing.T) { diags := bundle.Apply(context.Background(), b, mutator.EnvironmentsToTargets()) require.NoError(t, diags.Error()) - assert.Len(t, b.Config.Environments, 0) + assert.Empty(t, b.Config.Environments) assert.Len(t, b.Config.Targets, 1) } diff --git a/bundle/config/mutator/expand_pipeline_glob_paths_test.go b/bundle/config/mutator/expand_pipeline_glob_paths_test.go index 9f70b74ae..7cf3c9f3e 100644 --- a/bundle/config/mutator/expand_pipeline_glob_paths_test.go +++ b/bundle/config/mutator/expand_pipeline_glob_paths_test.go @@ -17,7 +17,7 @@ import ( ) func touchEmptyFile(t *testing.T, path string) { - err := os.MkdirAll(filepath.Dir(path), 0700) + err := os.MkdirAll(filepath.Dir(path), 0o700) require.NoError(t, err) f, err := os.Create(path) require.NoError(t, err) diff --git a/bundle/config/mutator/expand_workspace_root.go b/bundle/config/mutator/expand_workspace_root.go index 3f0547de1..2ec70548f 100644 --- a/bundle/config/mutator/expand_workspace_root.go +++ b/bundle/config/mutator/expand_workspace_root.go @@ -2,7 +2,6 @@ package mutator import ( "context" - "fmt" "path" "strings" @@ -28,12 +27,12 @@ func (m *expandWorkspaceRoot) Apply(ctx context.Context, b *bundle.Bundle) diag. } currentUser := b.Config.Workspace.CurrentUser - if currentUser == nil || currentUser.UserName == "" { + if currentUser == nil || currentUser.User == nil || currentUser.UserName == "" { return diag.Errorf("unable to expand workspace root: current user not set") } if strings.HasPrefix(root, "~/") { - home := fmt.Sprintf("/Workspace/Users/%s", currentUser.UserName) + home := "/Workspace/Users/" + currentUser.UserName b.Config.Workspace.RootPath = path.Join(home, root[2:]) } diff --git a/bundle/config/mutator/initialize_urls.go b/bundle/config/mutator/initialize_urls.go index 319305912..35ff53d0b 100644 --- a/bundle/config/mutator/initialize_urls.go +++ b/bundle/config/mutator/initialize_urls.go @@ -10,8 +10,7 @@ import ( "github.com/databricks/cli/libs/diag" ) -type initializeURLs struct { -} +type initializeURLs struct{} // InitializeURLs makes sure the URL field of each resource is configured. // NOTE: since this depends on an extra API call, this mutator adds some extra @@ -32,11 +31,14 @@ func (m *initializeURLs) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagn } orgId := strconv.FormatInt(workspaceId, 10) host := b.WorkspaceClient().Config.CanonicalHostName() - initializeForWorkspace(b, orgId, host) + err = initializeForWorkspace(b, orgId, host) + if err != nil { + return diag.FromErr(err) + } return nil } -func initializeForWorkspace(b *bundle.Bundle, orgId string, host string) error { +func initializeForWorkspace(b *bundle.Bundle, orgId, host string) error { baseURL, err := url.Parse(host) if err != nil { return err diff --git a/bundle/config/mutator/initialize_urls_test.go b/bundle/config/mutator/initialize_urls_test.go index ec4e790c4..f07a7deb3 100644 --- a/bundle/config/mutator/initialize_urls_test.go +++ b/bundle/config/mutator/initialize_urls_test.go @@ -110,7 +110,8 @@ func TestInitializeURLs(t *testing.T) { "dashboard1": "https://mycompany.databricks.com/dashboardsv3/01ef8d56871e1d50ae30ce7375e42478/published?o=123456", } - initializeForWorkspace(b, "123456", "https://mycompany.databricks.com/") + err := initializeForWorkspace(b, "123456", "https://mycompany.databricks.com/") + require.NoError(t, err) for _, group := range b.Config.Resources.AllResources() { for key, r := range group.Resources { @@ -133,7 +134,8 @@ func TestInitializeURLsWithoutOrgId(t *testing.T) { }, } - initializeForWorkspace(b, "123456", "https://adb-123456.azuredatabricks.net/") + err := initializeForWorkspace(b, "123456", "https://adb-123456.azuredatabricks.net/") + require.NoError(t, err) require.Equal(t, "https://adb-123456.azuredatabricks.net/jobs/1", b.Config.Resources.Jobs["job1"].URL) } diff --git a/bundle/config/mutator/load_git_details.go b/bundle/config/mutator/load_git_details.go index 82255552a..5c263ac03 100644 --- a/bundle/config/mutator/load_git_details.go +++ b/bundle/config/mutator/load_git_details.go @@ -2,6 +2,8 @@ package mutator import ( "context" + "errors" + "os" "path/filepath" "github.com/databricks/cli/bundle" @@ -24,7 +26,9 @@ func (m *loadGitDetails) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagn var diags diag.Diagnostics info, err := git.FetchRepositoryInfo(ctx, b.BundleRoot.Native(), b.WorkspaceClient()) if err != nil { - diags = append(diags, diag.WarningFromErr(err)...) + if !errors.Is(err, os.ErrNotExist) { + diags = append(diags, diag.WarningFromErr(err)...) + } } if info.WorktreeRoot == "" { diff --git a/bundle/config/mutator/merge_job_tasks_test.go b/bundle/config/mutator/merge_job_tasks_test.go index a9dae1e10..e6675eefb 100644 --- a/bundle/config/mutator/merge_job_tasks_test.go +++ b/bundle/config/mutator/merge_job_tasks_test.go @@ -74,8 +74,8 @@ func TestMergeJobTasks(t *testing.T) { assert.Equal(t, "i3.2xlarge", cluster.NodeTypeId) assert.Equal(t, 4, cluster.NumWorkers) assert.Len(t, task0.Libraries, 2) - assert.Equal(t, task0.Libraries[0].Whl, "package1") - assert.Equal(t, task0.Libraries[1].Pypi.Package, "package2") + assert.Equal(t, "package1", task0.Libraries[0].Whl) + assert.Equal(t, "package2", task0.Libraries[1].Pypi.Package) // This task was left untouched. task1 := j.Tasks[1].NewCluster diff --git a/bundle/config/mutator/override_compute.go b/bundle/config/mutator/override_compute.go index 5700cdf26..343303402 100644 --- a/bundle/config/mutator/override_compute.go +++ b/bundle/config/mutator/override_compute.go @@ -6,6 +6,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/cli/libs/cmdio" "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/env" ) @@ -22,7 +23,7 @@ func (m *overrideCompute) Name() string { func overrideJobCompute(j *resources.Job, compute string) { for i := range j.Tasks { - var task = &j.Tasks[i] + task := &j.Tasks[i] if task.ForEachTask != nil { task = &task.ForEachTask.Task @@ -38,18 +39,32 @@ func overrideJobCompute(j *resources.Job, compute string) { } func (m *overrideCompute) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { - if b.Config.Bundle.Mode != config.Development { + var diags diag.Diagnostics + + if b.Config.Bundle.Mode == config.Production { if b.Config.Bundle.ClusterId != "" { - return diag.Errorf("cannot override compute for an target that does not use 'mode: development'") + // Overriding compute via a command-line flag for production works, but is not recommended. + diags = diags.Extend(diag.Diagnostics{{ + Summary: "Setting a cluster override for a target that uses 'mode: production' is not recommended", + Detail: "It is recommended to always use the same compute for production target for consistency.", + Severity: diag.Warning, + }}) } - return nil } if v := env.Get(ctx, "DATABRICKS_CLUSTER_ID"); v != "" { + // For historical reasons, we allow setting the cluster ID via the DATABRICKS_CLUSTER_ID + // when development mode is used. Sometimes, this is done by accident, so we log an info message. + if b.Config.Bundle.Mode == config.Development { + cmdio.LogString(ctx, "Setting a cluster override because DATABRICKS_CLUSTER_ID is set. It is recommended to use --cluster-id instead, which works in any target mode.") + } else { + // We don't allow using DATABRICKS_CLUSTER_ID in any other mode, it's too error-prone. + return diag.Warningf("The DATABRICKS_CLUSTER_ID variable is set but is ignored since the current target does not use 'mode: development'") + } b.Config.Bundle.ClusterId = v } if b.Config.Bundle.ClusterId == "" { - return nil + return diags } r := b.Config.Resources @@ -57,5 +72,5 @@ func (m *overrideCompute) Apply(ctx context.Context, b *bundle.Bundle) diag.Diag overrideJobCompute(r.Jobs[i], b.Config.Bundle.ClusterId) } - return nil + return diags } diff --git a/bundle/config/mutator/override_compute_test.go b/bundle/config/mutator/override_compute_test.go index 369447d7e..1fdeb373c 100644 --- a/bundle/config/mutator/override_compute_test.go +++ b/bundle/config/mutator/override_compute_test.go @@ -8,13 +8,14 @@ import ( "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config/mutator" "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/cli/libs/diag" "github.com/databricks/databricks-sdk-go/service/compute" "github.com/databricks/databricks-sdk-go/service/jobs" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) -func TestOverrideDevelopment(t *testing.T) { +func TestOverrideComputeModeDevelopment(t *testing.T) { t.Setenv("DATABRICKS_CLUSTER_ID", "") b := &bundle.Bundle{ Config: config.Root{ @@ -62,10 +63,13 @@ func TestOverrideDevelopment(t *testing.T) { assert.Empty(t, b.Config.Resources.Jobs["job1"].Tasks[3].JobClusterKey) } -func TestOverrideDevelopmentEnv(t *testing.T) { +func TestOverrideComputeModeDefaultIgnoresVariable(t *testing.T) { t.Setenv("DATABRICKS_CLUSTER_ID", "newClusterId") b := &bundle.Bundle{ Config: config.Root{ + Bundle: config.Bundle{ + Mode: "", + }, Resources: config.Resources{ Jobs: map[string]*resources.Job{ "job1": {JobSettings: &jobs.JobSettings{ @@ -86,11 +90,12 @@ func TestOverrideDevelopmentEnv(t *testing.T) { m := mutator.OverrideCompute() diags := bundle.Apply(context.Background(), b, m) - require.NoError(t, diags.Error()) + require.Len(t, diags, 1) + assert.Equal(t, "The DATABRICKS_CLUSTER_ID variable is set but is ignored since the current target does not use 'mode: development'", diags[0].Summary) assert.Equal(t, "cluster2", b.Config.Resources.Jobs["job1"].Tasks[1].ExistingClusterId) } -func TestOverridePipelineTask(t *testing.T) { +func TestOverrideComputePipelineTask(t *testing.T) { t.Setenv("DATABRICKS_CLUSTER_ID", "newClusterId") b := &bundle.Bundle{ Config: config.Root{ @@ -115,7 +120,7 @@ func TestOverridePipelineTask(t *testing.T) { assert.Empty(t, b.Config.Resources.Jobs["job1"].Tasks[0].ExistingClusterId) } -func TestOverrideForEachTask(t *testing.T) { +func TestOverrideComputeForEachTask(t *testing.T) { t.Setenv("DATABRICKS_CLUSTER_ID", "newClusterId") b := &bundle.Bundle{ Config: config.Root{ @@ -140,10 +145,11 @@ func TestOverrideForEachTask(t *testing.T) { assert.Empty(t, b.Config.Resources.Jobs["job1"].Tasks[0].ForEachTask.Task) } -func TestOverrideProduction(t *testing.T) { +func TestOverrideComputeModeProduction(t *testing.T) { b := &bundle.Bundle{ Config: config.Root{ Bundle: config.Bundle{ + Mode: config.Production, ClusterId: "newClusterID", }, Resources: config.Resources{ @@ -166,13 +172,19 @@ func TestOverrideProduction(t *testing.T) { m := mutator.OverrideCompute() diags := bundle.Apply(context.Background(), b, m) - require.True(t, diags.HasError()) + require.Len(t, diags, 1) + assert.Equal(t, "Setting a cluster override for a target that uses 'mode: production' is not recommended", diags[0].Summary) + assert.Equal(t, diag.Warning, diags[0].Severity) + assert.Equal(t, "newClusterID", b.Config.Resources.Jobs["job1"].Tasks[0].ExistingClusterId) } -func TestOverrideProductionEnv(t *testing.T) { +func TestOverrideComputeModeProductionIgnoresVariable(t *testing.T) { t.Setenv("DATABRICKS_CLUSTER_ID", "newClusterId") b := &bundle.Bundle{ Config: config.Root{ + Bundle: config.Bundle{ + Mode: config.Production, + }, Resources: config.Resources{ Jobs: map[string]*resources.Job{ "job1": {JobSettings: &jobs.JobSettings{ @@ -193,5 +205,7 @@ func TestOverrideProductionEnv(t *testing.T) { m := mutator.OverrideCompute() diags := bundle.Apply(context.Background(), b, m) - require.NoError(t, diags.Error()) + require.Len(t, diags, 1) + assert.Equal(t, "The DATABRICKS_CLUSTER_ID variable is set but is ignored since the current target does not use 'mode: development'", diags[0].Summary) + assert.Equal(t, "cluster2", b.Config.Resources.Jobs["job1"].Tasks[1].ExistingClusterId) } diff --git a/bundle/config/mutator/paths/job_paths_visitor.go b/bundle/config/mutator/paths/job_paths_visitor.go index 275a8fa53..1d713aaf5 100644 --- a/bundle/config/mutator/paths/job_paths_visitor.go +++ b/bundle/config/mutator/paths/job_paths_visitor.go @@ -95,7 +95,7 @@ func jobRewritePatterns() []jobRewritePattern { // VisitJobPaths visits all paths in job resources and applies a function to each path. func VisitJobPaths(value dyn.Value, fn VisitFunc) (dyn.Value, error) { var err error - var newValue = value + newValue := value for _, rewritePattern := range jobRewritePatterns() { newValue, err = dyn.MapByPattern(newValue, rewritePattern.pattern, func(p dyn.Path, v dyn.Value) (dyn.Value, error) { @@ -105,7 +105,6 @@ func VisitJobPaths(value dyn.Value, fn VisitFunc) (dyn.Value, error) { return fn(p, rewritePattern.kind, v) }) - if err != nil { return dyn.InvalidValue, err } diff --git a/bundle/config/mutator/prepend_workspace_prefix.go b/bundle/config/mutator/prepend_workspace_prefix.go index e0be2572d..616759ee4 100644 --- a/bundle/config/mutator/prepend_workspace_prefix.go +++ b/bundle/config/mutator/prepend_workspace_prefix.go @@ -55,16 +55,14 @@ func (m *prependWorkspacePrefix) Apply(ctx context.Context, b *bundle.Bundle) di } } - return dyn.NewValue(fmt.Sprintf("/Workspace%s", path), v.Locations()), nil + return dyn.NewValue("/Workspace"+path, v.Locations()), nil }) - if err != nil { return dyn.InvalidValue, err } } return v, nil }) - if err != nil { return diag.FromErr(err) } diff --git a/bundle/config/mutator/process_target_mode.go b/bundle/config/mutator/process_target_mode.go index 920c5b5c3..cc07704bf 100644 --- a/bundle/config/mutator/process_target_mode.go +++ b/bundle/config/mutator/process_target_mode.go @@ -7,7 +7,6 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" - "github.com/databricks/cli/libs/dbr" "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/dyn" "github.com/databricks/cli/libs/iamutil" @@ -59,14 +58,6 @@ func transformDevelopmentMode(ctx context.Context, b *bundle.Bundle) { t.TriggerPauseStatus = config.Paused } - if !config.IsExplicitlyDisabled(t.SourceLinkedDeployment) { - isInWorkspace := strings.HasPrefix(b.SyncRootPath, "/Workspace/") - if isInWorkspace && dbr.RunsOnRuntime(ctx) { - enabled := true - t.SourceLinkedDeployment = &enabled - } - } - if !config.IsExplicitlyDisabled(t.PipelinesDevelopment) { enabled := true t.PipelinesDevelopment = &enabled diff --git a/bundle/config/mutator/process_target_mode_test.go b/bundle/config/mutator/process_target_mode_test.go index 5f38142ed..6df88d067 100644 --- a/bundle/config/mutator/process_target_mode_test.go +++ b/bundle/config/mutator/process_target_mode_test.go @@ -3,14 +3,12 @@ package mutator import ( "context" "reflect" - "runtime" "slices" "testing" "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config/resources" - "github.com/databricks/cli/libs/dbr" "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/tags" "github.com/databricks/cli/libs/vfs" @@ -163,18 +161,18 @@ func TestProcessTargetModeDevelopment(t *testing.T) { // Job 1 assert.Equal(t, "[dev lennart] job1", b.Config.Resources.Jobs["job1"].Name) - assert.Equal(t, b.Config.Resources.Jobs["job1"].Tags["existing"], "tag") - assert.Equal(t, b.Config.Resources.Jobs["job1"].Tags["dev"], "lennart") - assert.Equal(t, b.Config.Resources.Jobs["job1"].Schedule.PauseStatus, jobs.PauseStatusPaused) + assert.Equal(t, "tag", b.Config.Resources.Jobs["job1"].Tags["existing"]) + assert.Equal(t, "lennart", b.Config.Resources.Jobs["job1"].Tags["dev"]) + assert.Equal(t, jobs.PauseStatusPaused, b.Config.Resources.Jobs["job1"].Schedule.PauseStatus) // Job 2 assert.Equal(t, "[dev lennart] job2", b.Config.Resources.Jobs["job2"].Name) - assert.Equal(t, b.Config.Resources.Jobs["job2"].Tags["dev"], "lennart") - assert.Equal(t, b.Config.Resources.Jobs["job2"].Schedule.PauseStatus, jobs.PauseStatusUnpaused) + assert.Equal(t, "lennart", b.Config.Resources.Jobs["job2"].Tags["dev"]) + assert.Equal(t, jobs.PauseStatusUnpaused, b.Config.Resources.Jobs["job2"].Schedule.PauseStatus) // Pipeline 1 assert.Equal(t, "[dev lennart] pipeline1", b.Config.Resources.Pipelines["pipeline1"].Name) - assert.Equal(t, false, b.Config.Resources.Pipelines["pipeline1"].Continuous) + assert.False(t, b.Config.Resources.Pipelines["pipeline1"].Continuous) assert.True(t, b.Config.Resources.Pipelines["pipeline1"].PipelineSpec.Development) // Experiment 1 @@ -399,7 +397,7 @@ func TestAllResourcesMocked(t *testing.T) { b := mockBundle(config.Development) resources := reflect.ValueOf(b.Config.Resources) - for i := 0; i < resources.NumField(); i++ { + for i := range resources.NumField() { field := resources.Field(i) if field.Kind() == reflect.Map { assert.True( @@ -428,7 +426,7 @@ func TestAllNonUcResourcesAreRenamed(t *testing.T) { require.NoError(t, diags.Error()) resources := reflect.ValueOf(b.Config.Resources) - for i := 0; i < resources.NumField(); i++ { + for i := range resources.NumField() { field := resources.Field(i) if field.Kind() == reflect.Map { @@ -557,32 +555,3 @@ func TestPipelinesDevelopmentDisabled(t *testing.T) { assert.False(t, b.Config.Resources.Pipelines["pipeline1"].PipelineSpec.Development) } - -func TestSourceLinkedDeploymentEnabled(t *testing.T) { - b, diags := processSourceLinkedBundle(t, true) - require.NoError(t, diags.Error()) - assert.True(t, *b.Config.Presets.SourceLinkedDeployment) -} - -func TestSourceLinkedDeploymentDisabled(t *testing.T) { - b, diags := processSourceLinkedBundle(t, false) - require.NoError(t, diags.Error()) - assert.False(t, *b.Config.Presets.SourceLinkedDeployment) -} - -func processSourceLinkedBundle(t *testing.T, presetEnabled bool) (*bundle.Bundle, diag.Diagnostics) { - if runtime.GOOS == "windows" { - t.Skip("this test is not applicable on Windows because source-linked mode works only in the Databricks Workspace") - } - - b := mockBundle(config.Development) - - workspacePath := "/Workspace/lennart@company.com/" - b.SyncRootPath = workspacePath - b.Config.Presets.SourceLinkedDeployment = &presetEnabled - - ctx := dbr.MockRuntime(context.Background(), true) - m := bundle.Seq(ProcessTargetMode(), ApplyPresets()) - diags := bundle.Apply(ctx, b, m) - return b, diags -} diff --git a/bundle/config/mutator/python/python_diagnostics_test.go b/bundle/config/mutator/python/python_diagnostics_test.go index b73b0f73c..fd6def8da 100644 --- a/bundle/config/mutator/python/python_diagnostics_test.go +++ b/bundle/config/mutator/python/python_diagnostics_test.go @@ -30,7 +30,6 @@ type parsePythonDiagnosticsTest struct { } func TestParsePythonDiagnostics(t *testing.T) { - testCases := []parsePythonDiagnosticsTest{ { name: "short error with location", diff --git a/bundle/config/mutator/python/python_mutator.go b/bundle/config/mutator/python/python_mutator.go index da6c4d210..8009ab243 100644 --- a/bundle/config/mutator/python/python_mutator.go +++ b/bundle/config/mutator/python/python_mutator.go @@ -9,12 +9,12 @@ import ( "io" "os" "path/filepath" + "reflect" + "strings" "github.com/databricks/databricks-sdk-go/logger" "github.com/fatih/color" - "strings" - "github.com/databricks/cli/libs/python" "github.com/databricks/cli/bundle/env" @@ -41,6 +41,8 @@ const ( // We also open for possibility of appending other sections of bundle configuration, // for example, adding new variables. However, this is not supported yet, and CLI rejects // such changes. + // + // Deprecated, left for backward-compatibility with PyDABs. PythonMutatorPhaseLoad phase = "load" // PythonMutatorPhaseInit is the phase after bundle configuration was loaded, and @@ -60,7 +62,46 @@ const ( // PyDABs can output YAML containing references to variables, and CLI should resolve them. // // Existing resources can't be removed, and CLI rejects such changes. + // + // Deprecated, left for backward-compatibility with PyDABs. PythonMutatorPhaseInit phase = "init" + + // PythonMutatorPhaseLoadResources is the phase in which YAML configuration was loaded. + // + // At this stage, we execute Python code to load resources defined in Python. + // + // During this process, Python code can access: + // - selected deployment target + // - bundle variable values + // - variables provided through CLI argument or environment variables + // + // The following is not available: + // - variables referencing other variables are in unresolved format + // + // Python code can output YAML referencing variables, and CLI should resolve them. + // + // Existing resources can't be removed or modified, and CLI rejects such changes. + // While it's called 'load_resources', this phase is executed in 'init' phase of mutator pipeline. + PythonMutatorPhaseLoadResources phase = "load_resources" + + // PythonMutatorPhaseApplyMutators is the phase in which resources defined in YAML or Python + // are already loaded. + // + // At this stage, we execute Python code to mutate resources defined in YAML or Python. + // + // During this process, Python code can access: + // - selected deployment target + // - bundle variable values + // - variables provided through CLI argument or environment variables + // + // The following is not available: + // - variables referencing other variables are in unresolved format + // + // Python code can output YAML referencing variables, and CLI should resolve them. + // + // Resources can't be added or removed, and CLI rejects such changes. Python code is + // allowed to modify existing resources, but not other parts of bundle configuration. + PythonMutatorPhaseApplyMutators phase = "apply_mutators" ) type pythonMutator struct { @@ -77,28 +118,73 @@ func (m *pythonMutator) Name() string { return fmt.Sprintf("PythonMutator(%s)", m.phase) } -func getExperimental(b *bundle.Bundle) config.Experimental { - if b.Config.Experimental == nil { - return config.Experimental{} +// opts is a common structure for deprecated PyDABs and upcoming Python +// configuration sections +type opts struct { + enabled bool + + venvPath string +} + +// getOpts adapts deprecated PyDABs and upcoming Python configuration +// into a common structure. +func getOpts(b *bundle.Bundle, phase phase) (opts, error) { + experimental := b.Config.Experimental + if experimental == nil { + return opts{}, nil } - return *b.Config.Experimental + // using reflect.DeepEquals in case we add more fields + pydabsEnabled := !reflect.DeepEqual(experimental.PyDABs, config.PyDABs{}) + pythonEnabled := !reflect.DeepEqual(experimental.Python, config.Python{}) + + if pydabsEnabled && pythonEnabled { + return opts{}, errors.New("both experimental/pydabs and experimental/python are enabled, only one can be enabled") + } else if pydabsEnabled { + if !experimental.PyDABs.Enabled { + return opts{}, nil + } + + // don't execute for phases for 'python' section + if phase == PythonMutatorPhaseInit || phase == PythonMutatorPhaseLoad { + return opts{ + enabled: true, + venvPath: experimental.PyDABs.VEnvPath, + }, nil + } else { + return opts{}, nil + } + } else if pythonEnabled { + // don't execute for phases for 'pydabs' section + if phase == PythonMutatorPhaseLoadResources || phase == PythonMutatorPhaseApplyMutators { + return opts{ + enabled: true, + venvPath: experimental.Python.VEnvPath, + }, nil + } else { + return opts{}, nil + } + } else { + return opts{}, nil + } } func (m *pythonMutator) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { - experimental := getExperimental(b) + opts, err := getOpts(b, m.phase) + if err != nil { + return diag.Errorf("failed to apply python mutator: %s", err) + } - if !experimental.PyDABs.Enabled { + if !opts.enabled { return nil } // mutateDiags is used because Mutate returns 'error' instead of 'diag.Diagnostics' var mutateDiags diag.Diagnostics - var mutateDiagsHasError = errors.New("unexpected error") - - err := b.Config.Mutate(func(leftRoot dyn.Value) (dyn.Value, error) { - pythonPath, err := detectExecutable(ctx, experimental.PyDABs.VEnvPath) + mutateDiagsHasError := errors.New("unexpected error") + err = b.Config.Mutate(func(leftRoot dyn.Value) (dyn.Value, error) { + pythonPath, err := detectExecutable(ctx, opts.venvPath) if err != nil { return dyn.InvalidValue, fmt.Errorf("failed to get Python interpreter path: %w", err) } @@ -139,9 +225,9 @@ func createCacheDir(ctx context.Context) (string, error) { // support the same env variable as in b.CacheDir if tempDir, exists := env.TempDir(ctx); exists { // use 'default' as target name - cacheDir := filepath.Join(tempDir, "default", "pydabs") + cacheDir := filepath.Join(tempDir, "default", "python") - err := os.MkdirAll(cacheDir, 0700) + err := os.MkdirAll(cacheDir, 0o700) if err != nil { return "", err } @@ -149,10 +235,10 @@ func createCacheDir(ctx context.Context) (string, error) { return cacheDir, nil } - return os.MkdirTemp("", "-pydabs") + return os.MkdirTemp("", "-python") } -func (m *pythonMutator) runPythonMutator(ctx context.Context, cacheDir string, rootPath string, pythonPath string, root dyn.Value) (dyn.Value, diag.Diagnostics) { +func (m *pythonMutator) runPythonMutator(ctx context.Context, cacheDir, rootPath, pythonPath string, root dyn.Value) (dyn.Value, diag.Diagnostics) { inputPath := filepath.Join(cacheDir, "input.json") outputPath := filepath.Join(cacheDir, "output.json") diagnosticsPath := filepath.Join(cacheDir, "diagnostics.json") @@ -205,7 +291,7 @@ func (m *pythonMutator) runPythonMutator(ctx context.Context, cacheDir string, r } // process can fail without reporting errors in diagnostics file or creating it, for instance, - // venv doesn't have PyDABs library installed + // venv doesn't have 'databricks-bundles' library installed if processErr != nil { diagnostic := diag.Diagnostic{ Severity: diag.Error, @@ -228,16 +314,15 @@ func (m *pythonMutator) runPythonMutator(ctx context.Context, cacheDir string, r return output, pythonDiagnostics } -const installExplanation = `If using Python wheels, ensure that 'databricks-pydabs' is included in the dependencies, -and that the wheel is installed in the Python environment: +const pythonInstallExplanation = `Ensure that 'databricks-bundles' is installed in Python environment: - $ .venv/bin/pip install -e . + $ .venv/bin/pip install databricks-bundles If using a virtual environment, ensure it is specified as the venv_path property in databricks.yml, or activate the environment before running CLI commands: experimental: - pydabs: + python: venv_path: .venv ` @@ -247,9 +332,9 @@ or activate the environment before running CLI commands: func explainProcessErr(stderr string) string { // implemented in cpython/Lib/runpy.py and portable across Python 3.x, including pypy if strings.Contains(stderr, "Error while finding module specification for 'databricks.bundles.build'") { - summary := color.CyanString("Explanation: ") + "'databricks-pydabs' library is not installed in the Python environment.\n" + summary := color.CyanString("Explanation: ") + "'databricks-bundles' library is not installed in the Python environment.\n" - return stderr + "\n" + summary + "\n" + installExplanation + return stderr + "\n" + summary + "\n" + pythonInstallExplanation } return stderr @@ -263,10 +348,10 @@ func writeInputFile(inputPath string, input dyn.Value) error { return fmt.Errorf("failed to marshal input: %w", err) } - return os.WriteFile(inputPath, rootConfigJson, 0600) + return os.WriteFile(inputPath, rootConfigJson, 0o600) } -func loadOutputFile(rootPath string, outputPath string) (dyn.Value, diag.Diagnostics) { +func loadOutputFile(rootPath, outputPath string) (dyn.Value, diag.Diagnostics) { outputFile, err := os.Open(outputPath) if err != nil { return dyn.InvalidValue, diag.FromErr(fmt.Errorf("failed to open output file: %w", err)) @@ -279,10 +364,10 @@ func loadOutputFile(rootPath string, outputPath string) (dyn.Value, diag.Diagnos // // virtualPath has to stay in rootPath, because locations outside root path are not allowed: // - // Error: path /var/folders/.../pydabs/dist/*.whl is not contained in bundle root path + // Error: path /var/folders/.../python/dist/*.whl is not contained in bundle root path // // for that, we pass virtualPath instead of outputPath as file location - virtualPath, err := filepath.Abs(filepath.Join(rootPath, "__generated_by_pydabs__.yml")) + virtualPath, err := filepath.Abs(filepath.Join(rootPath, "__generated_by_python__.yml")) if err != nil { return dyn.InvalidValue, diag.FromErr(fmt.Errorf("failed to get absolute path: %w", err)) } @@ -336,19 +421,23 @@ func loadDiagnosticsFile(path string) (diag.Diagnostics, error) { func createOverrideVisitor(ctx context.Context, phase phase) (merge.OverrideVisitor, error) { switch phase { case PythonMutatorPhaseLoad: - return createLoadOverrideVisitor(ctx), nil + return createLoadResourcesOverrideVisitor(ctx), nil case PythonMutatorPhaseInit: - return createInitOverrideVisitor(ctx), nil + return createInitOverrideVisitor(ctx, insertResourceModeAllow), nil + case PythonMutatorPhaseLoadResources: + return createLoadResourcesOverrideVisitor(ctx), nil + case PythonMutatorPhaseApplyMutators: + return createInitOverrideVisitor(ctx, insertResourceModeDisallow), nil default: return merge.OverrideVisitor{}, fmt.Errorf("unknown phase: %s", phase) } } -// createLoadOverrideVisitor creates an override visitor for the load phase. +// createLoadResourcesOverrideVisitor creates an override visitor for the load_resources phase. // -// During load, it's only possible to create new resources, and not modify or +// During load_resources, it's only possible to create new resources, and not modify or // delete existing ones. -func createLoadOverrideVisitor(ctx context.Context) merge.OverrideVisitor { +func createLoadResourcesOverrideVisitor(ctx context.Context) merge.OverrideVisitor { resourcesPath := dyn.NewPath(dyn.Key("resources")) jobsPath := dyn.NewPath(dyn.Key("resources"), dyn.Key("jobs")) @@ -381,17 +470,27 @@ func createLoadOverrideVisitor(ctx context.Context) merge.OverrideVisitor { return right, nil }, - VisitUpdate: func(valuePath dyn.Path, left dyn.Value, right dyn.Value) (dyn.Value, error) { + VisitUpdate: func(valuePath dyn.Path, left, right dyn.Value) (dyn.Value, error) { return dyn.InvalidValue, fmt.Errorf("unexpected change at %q (update)", valuePath.String()) }, } } +// insertResourceMode controls whether createInitOverrideVisitor allows or disallows inserting new resources. +type insertResourceMode int + +const ( + insertResourceModeDisallow insertResourceMode = iota + insertResourceModeAllow insertResourceMode = iota +) + // createInitOverrideVisitor creates an override visitor for the init phase. // // During the init phase it's possible to create new resources, modify existing // resources, but not delete existing resources. -func createInitOverrideVisitor(ctx context.Context) merge.OverrideVisitor { +// +// If mode is insertResourceModeDisallow, it matching expected behaviour of apply_mutators +func createInitOverrideVisitor(ctx context.Context, mode insertResourceMode) merge.OverrideVisitor { resourcesPath := dyn.NewPath(dyn.Key("resources")) jobsPath := dyn.NewPath(dyn.Key("resources"), dyn.Key("jobs")) @@ -426,11 +525,16 @@ func createInitOverrideVisitor(ctx context.Context) merge.OverrideVisitor { return dyn.InvalidValue, fmt.Errorf("unexpected change at %q (insert)", valuePath.String()) } + insertResource := len(valuePath) == len(jobsPath)+1 + if mode == insertResourceModeDisallow && insertResource { + return dyn.InvalidValue, fmt.Errorf("unexpected change at %q (insert)", valuePath.String()) + } + log.Debugf(ctx, "Insert value at %q", valuePath.String()) return right, nil }, - VisitUpdate: func(valuePath dyn.Path, left dyn.Value, right dyn.Value) (dyn.Value, error) { + VisitUpdate: func(valuePath dyn.Path, left, right dyn.Value) (dyn.Value, error) { if !valuePath.HasPrefix(jobsPath) { return dyn.InvalidValue, fmt.Errorf("unexpected change at %q (update)", valuePath.String()) } @@ -443,9 +547,9 @@ func createInitOverrideVisitor(ctx context.Context) merge.OverrideVisitor { } func isOmitemptyDelete(left dyn.Value) bool { - // PyDABs can omit empty sequences/mappings in output, because we don't track them as optional, + // Python output can omit empty sequences/mappings, because we don't track them as optional, // there is no semantic difference between empty and missing, so we keep them as they were before - // PyDABs deleted them. + // Python mutator deleted them. switch left.Kind() { case dyn.KindMap: diff --git a/bundle/config/mutator/python/python_mutator_test.go b/bundle/config/mutator/python/python_mutator_test.go index 7a419d799..d51572c8a 100644 --- a/bundle/config/mutator/python/python_mutator_test.go +++ b/bundle/config/mutator/python/python_mutator_test.go @@ -2,6 +2,7 @@ package python import ( "context" + "errors" "fmt" "os" "os/exec" @@ -39,13 +40,25 @@ func TestPythonMutator_Name_init(t *testing.T) { assert.Equal(t, "PythonMutator(init)", mutator.Name()) } -func TestPythonMutator_load(t *testing.T) { +func TestPythonMutator_Name_loadResources(t *testing.T) { + mutator := PythonMutator(PythonMutatorPhaseLoadResources) + + assert.Equal(t, "PythonMutator(load_resources)", mutator.Name()) +} + +func TestPythonMutator_Name_applyMutators(t *testing.T) { + mutator := PythonMutator(PythonMutatorPhaseApplyMutators) + + assert.Equal(t, "PythonMutator(apply_mutators)", mutator.Name()) +} + +func TestPythonMutator_loadResources(t *testing.T) { withFakeVEnv(t, ".venv") b := loadYaml("databricks.yml", ` experimental: - pydabs: - enabled: true + python: + resources: ["resources:load_resources"] venv_path: .venv resources: jobs: @@ -59,12 +72,12 @@ func TestPythonMutator_load(t *testing.T) { "-m", "databricks.bundles.build", "--phase", - "load", + "load_resources", }, `{ "experimental": { - "pydabs": { - "enabled": true, + "python": { + "resources": ["resources:load_resources"], "venv_path": ".venv" } }, @@ -82,7 +95,7 @@ func TestPythonMutator_load(t *testing.T) { `{"severity": "warning", "summary": "job doesn't have any tasks", "location": {"file": "src/examples/file.py", "line": 10, "column": 5}}`, ) - mutator := PythonMutator(PythonMutatorPhaseLoad) + mutator := PythonMutator(PythonMutatorPhaseLoadResources) diags := bundle.Apply(ctx, b, mutator) assert.NoError(t, diags.Error()) @@ -106,16 +119,14 @@ func TestPythonMutator_load(t *testing.T) { Column: 5, }, }, diags[0].Locations) - } -func TestPythonMutator_load_disallowed(t *testing.T) { +func TestPythonMutator_loadResources_disallowed(t *testing.T) { withFakeVEnv(t, ".venv") - b := loadYaml("databricks.yml", ` experimental: - pydabs: - enabled: true + python: + resources: ["resources:load_resources"] venv_path: .venv resources: jobs: @@ -129,12 +140,12 @@ func TestPythonMutator_load_disallowed(t *testing.T) { "-m", "databricks.bundles.build", "--phase", - "load", + "load_resources", }, `{ "experimental": { - "pydabs": { - "enabled": true, + "python": { + "resources": ["resources:load_resources"], "venv_path": ".venv" } }, @@ -148,20 +159,20 @@ func TestPythonMutator_load_disallowed(t *testing.T) { } }`, "") - mutator := PythonMutator(PythonMutatorPhaseLoad) + mutator := PythonMutator(PythonMutatorPhaseLoadResources) diag := bundle.Apply(ctx, b, mutator) assert.EqualError(t, diag.Error(), "unexpected change at \"resources.jobs.job0.description\" (insert)") } -func TestPythonMutator_init(t *testing.T) { +func TestPythonMutator_applyMutators(t *testing.T) { withFakeVEnv(t, ".venv") - b := loadYaml("databricks.yml", ` experimental: - pydabs: - enabled: true + python: venv_path: .venv + mutators: + - "mutators:add_description" resources: jobs: job0: @@ -174,13 +185,13 @@ func TestPythonMutator_init(t *testing.T) { "-m", "databricks.bundles.build", "--phase", - "init", + "apply_mutators", }, `{ "experimental": { - "pydabs": { - "enabled": true, - "venv_path": ".venv" + "python": { + "venv_path": ".venv", + "mutators": ["mutators:add_description"] } }, "resources": { @@ -193,7 +204,7 @@ func TestPythonMutator_init(t *testing.T) { } }`, "") - mutator := PythonMutator(PythonMutatorPhaseInit) + mutator := PythonMutator(PythonMutatorPhaseApplyMutators) diag := bundle.Apply(ctx, b, mutator) assert.NoError(t, diag.Error()) @@ -208,12 +219,12 @@ func TestPythonMutator_init(t *testing.T) { require.NoError(t, err) assert.Equal(t, "databricks.yml", name.Location().File) - // 'description' was updated by PyDABs and has location of generated file until + // 'description' was updated by Python code and has location of generated file until // we implement source maps description, err := dyn.GetByPath(v, dyn.MustPathFromString("resources.jobs.job0.description")) require.NoError(t, err) - expectedVirtualPath, err := filepath.Abs("__generated_by_pydabs__.yml") + expectedVirtualPath, err := filepath.Abs("__generated_by_python__.yml") require.NoError(t, err) assert.Equal(t, expectedVirtualPath, description.Location().File) @@ -224,12 +235,12 @@ func TestPythonMutator_init(t *testing.T) { func TestPythonMutator_badOutput(t *testing.T) { withFakeVEnv(t, ".venv") - b := loadYaml("databricks.yml", ` experimental: - pydabs: - enabled: true + python: venv_path: .venv + resources: + - "resources:load_resources" resources: jobs: job0: @@ -242,7 +253,7 @@ func TestPythonMutator_badOutput(t *testing.T) { "-m", "databricks.bundles.build", "--phase", - "load", + "load_resources", }, `{ "resources": { @@ -254,7 +265,7 @@ func TestPythonMutator_badOutput(t *testing.T) { } }`, "") - mutator := PythonMutator(PythonMutatorPhaseLoad) + mutator := PythonMutator(PythonMutatorPhaseLoadResources) diag := bundle.Apply(ctx, b, mutator) assert.EqualError(t, diag.Error(), "unknown field: unknown_property") @@ -270,34 +281,63 @@ func TestPythonMutator_disabled(t *testing.T) { assert.NoError(t, diag.Error()) } -func TestPythonMutator_venvRequired(t *testing.T) { - b := loadYaml("databricks.yml", ` - experimental: - pydabs: - enabled: true`) - - ctx := context.Background() - mutator := PythonMutator(PythonMutatorPhaseLoad) - diag := bundle.Apply(ctx, b, mutator) - - assert.Error(t, diag.Error(), "\"experimental.enable_pydabs\" is enabled, but \"experimental.venv.path\" is not set") -} - func TestPythonMutator_venvNotFound(t *testing.T) { expectedError := fmt.Sprintf("failed to get Python interpreter path: can't find %q, check if virtualenv is created", interpreterPath("bad_path")) b := loadYaml("databricks.yml", ` experimental: - pydabs: - enabled: true - venv_path: bad_path`) + python: + venv_path: bad_path + resources: + - "resources:load_resources"`) - mutator := PythonMutator(PythonMutatorPhaseInit) + mutator := PythonMutator(PythonMutatorPhaseLoadResources) diag := bundle.Apply(context.Background(), b, mutator) assert.EqualError(t, diag.Error(), expectedError) } +func TestGetOps_Python(t *testing.T) { + actual, err := getOpts(&bundle.Bundle{ + Config: config.Root{ + Experimental: &config.Experimental{ + Python: config.Python{ + VEnvPath: ".venv", + Resources: []string{ + "resources:load_resources", + }, + }, + }, + }, + }, PythonMutatorPhaseLoadResources) + + assert.NoError(t, err) + assert.Equal(t, opts{venvPath: ".venv", enabled: true}, actual) +} + +func TestGetOps_PyDABs(t *testing.T) { + actual, err := getOpts(&bundle.Bundle{ + Config: config.Root{ + Experimental: &config.Experimental{ + PyDABs: config.PyDABs{ + VEnvPath: ".venv", + Enabled: true, + }, + }, + }, + }, PythonMutatorPhaseInit) + + assert.NoError(t, err) + assert.Equal(t, opts{venvPath: ".venv", enabled: true}, actual) +} + +func TestGetOps_empty(t *testing.T) { + actual, err := getOpts(&bundle.Bundle{}, PythonMutatorPhaseLoadResources) + + assert.NoError(t, err) + assert.Equal(t, opts{enabled: false}, actual) +} + type createOverrideVisitorTestCase struct { name string updatePath dyn.Path @@ -315,48 +355,48 @@ func TestCreateOverrideVisitor(t *testing.T) { testCases := []createOverrideVisitorTestCase{ { - name: "load: can't change an existing job", - phase: PythonMutatorPhaseLoad, + name: "load_resources: can't change an existing job", + phase: PythonMutatorPhaseLoadResources, updatePath: dyn.MustPathFromString("resources.jobs.job0.name"), deletePath: dyn.MustPathFromString("resources.jobs.job0.name"), insertPath: dyn.MustPathFromString("resources.jobs.job0.name"), - deleteError: fmt.Errorf("unexpected change at \"resources.jobs.job0.name\" (delete)"), - insertError: fmt.Errorf("unexpected change at \"resources.jobs.job0.name\" (insert)"), - updateError: fmt.Errorf("unexpected change at \"resources.jobs.job0.name\" (update)"), + deleteError: errors.New("unexpected change at \"resources.jobs.job0.name\" (delete)"), + insertError: errors.New("unexpected change at \"resources.jobs.job0.name\" (insert)"), + updateError: errors.New("unexpected change at \"resources.jobs.job0.name\" (update)"), }, { - name: "load: can't delete an existing job", - phase: PythonMutatorPhaseLoad, + name: "load_resources: can't delete an existing job", + phase: PythonMutatorPhaseLoadResources, deletePath: dyn.MustPathFromString("resources.jobs.job0"), - deleteError: fmt.Errorf("unexpected change at \"resources.jobs.job0\" (delete)"), + deleteError: errors.New("unexpected change at \"resources.jobs.job0\" (delete)"), }, { - name: "load: can insert 'resources'", - phase: PythonMutatorPhaseLoad, + name: "load_resources: can insert 'resources'", + phase: PythonMutatorPhaseLoadResources, insertPath: dyn.MustPathFromString("resources"), insertError: nil, }, { - name: "load: can insert 'resources.jobs'", - phase: PythonMutatorPhaseLoad, + name: "load_resources: can insert 'resources.jobs'", + phase: PythonMutatorPhaseLoadResources, insertPath: dyn.MustPathFromString("resources.jobs"), insertError: nil, }, { - name: "load: can insert a job", - phase: PythonMutatorPhaseLoad, + name: "load_resources: can insert a job", + phase: PythonMutatorPhaseLoadResources, insertPath: dyn.MustPathFromString("resources.jobs.job0"), insertError: nil, }, { - name: "load: can't change include", - phase: PythonMutatorPhaseLoad, + name: "load_resources: can't change include", + phase: PythonMutatorPhaseLoadResources, deletePath: dyn.MustPathFromString("include[0]"), insertPath: dyn.MustPathFromString("include[0]"), updatePath: dyn.MustPathFromString("include[0]"), - deleteError: fmt.Errorf("unexpected change at \"include[0]\" (delete)"), - insertError: fmt.Errorf("unexpected change at \"include[0]\" (insert)"), - updateError: fmt.Errorf("unexpected change at \"include[0]\" (update)"), + deleteError: errors.New("unexpected change at \"include[0]\" (delete)"), + insertError: errors.New("unexpected change at \"include[0]\" (insert)"), + updateError: errors.New("unexpected change at \"include[0]\" (update)"), }, { name: "init: can change an existing job", @@ -372,7 +412,7 @@ func TestCreateOverrideVisitor(t *testing.T) { name: "init: can't delete an existing job", phase: PythonMutatorPhaseInit, deletePath: dyn.MustPathFromString("resources.jobs.job0"), - deleteError: fmt.Errorf("unexpected change at \"resources.jobs.job0\" (delete)"), + deleteError: errors.New("unexpected change at \"resources.jobs.job0\" (delete)"), }, { name: "init: can insert 'resources'", @@ -398,9 +438,43 @@ func TestCreateOverrideVisitor(t *testing.T) { deletePath: dyn.MustPathFromString("include[0]"), insertPath: dyn.MustPathFromString("include[0]"), updatePath: dyn.MustPathFromString("include[0]"), - deleteError: fmt.Errorf("unexpected change at \"include[0]\" (delete)"), - insertError: fmt.Errorf("unexpected change at \"include[0]\" (insert)"), - updateError: fmt.Errorf("unexpected change at \"include[0]\" (update)"), + deleteError: errors.New("unexpected change at \"include[0]\" (delete)"), + insertError: errors.New("unexpected change at \"include[0]\" (insert)"), + updateError: errors.New("unexpected change at \"include[0]\" (update)"), + }, + { + name: "apply_mutators: can't delete an existing job", + phase: PythonMutatorPhaseInit, + deletePath: dyn.MustPathFromString("resources.jobs.job0"), + deleteError: errors.New("unexpected change at \"resources.jobs.job0\" (delete)"), + }, + { + name: "apply_mutators: can insert 'resources'", + phase: PythonMutatorPhaseApplyMutators, + insertPath: dyn.MustPathFromString("resources"), + insertError: nil, + }, + { + name: "apply_mutators: can insert 'resources.jobs'", + phase: PythonMutatorPhaseApplyMutators, + insertPath: dyn.MustPathFromString("resources.jobs"), + insertError: nil, + }, + { + name: "apply_mutators: can't insert a job", + phase: PythonMutatorPhaseApplyMutators, + insertPath: dyn.MustPathFromString("resources.jobs.job0"), + insertError: errors.New("unexpected change at \"resources.jobs.job0\" (insert)"), + }, + { + name: "apply_mutators: can't change include", + phase: PythonMutatorPhaseApplyMutators, + deletePath: dyn.MustPathFromString("include[0]"), + insertPath: dyn.MustPathFromString("include[0]"), + updatePath: dyn.MustPathFromString("include[0]"), + deleteError: errors.New("unexpected change at \"include[0]\" (delete)"), + insertError: errors.New("unexpected change at \"include[0]\" (insert)"), + updateError: errors.New("unexpected change at \"include[0]\" (update)"), }, } @@ -459,9 +533,9 @@ type overrideVisitorOmitemptyTestCase struct { } func TestCreateOverrideVisitor_omitempty(t *testing.T) { - // PyDABs can omit empty sequences/mappings in output, because we don't track them as optional, + // Python output can omit empty sequences/mappings in output, because we don't track them as optional, // there is no semantic difference between empty and missing, so we keep them as they were before - // PyDABs deleted them. + // Python code deleted them. allPhases := []phase{PythonMutatorPhaseLoad, PythonMutatorPhaseInit} location := dyn.Location{ @@ -542,7 +616,7 @@ func TestLoadDiagnosticsFile_nonExistent(t *testing.T) { func TestInterpreterPath(t *testing.T) { if runtime.GOOS == "windows" { - assert.Equal(t, "venv\\Scripts\\python3.exe", interpreterPath("venv")) + assert.Equal(t, "venv\\Scripts\\python.exe", interpreterPath("venv")) } else { assert.Equal(t, "venv/bin/python3", interpreterPath("venv")) } @@ -568,18 +642,17 @@ func TestExplainProcessErr(t *testing.T) { stderr := "/home/test/.venv/bin/python3: Error while finding module specification for 'databricks.bundles.build' (ModuleNotFoundError: No module named 'databricks')\n" expected := `/home/test/.venv/bin/python3: Error while finding module specification for 'databricks.bundles.build' (ModuleNotFoundError: No module named 'databricks') -Explanation: 'databricks-pydabs' library is not installed in the Python environment. +Explanation: 'databricks-bundles' library is not installed in the Python environment. -If using Python wheels, ensure that 'databricks-pydabs' is included in the dependencies, -and that the wheel is installed in the Python environment: +Ensure that 'databricks-bundles' is installed in Python environment: - $ .venv/bin/pip install -e . + $ .venv/bin/pip install databricks-bundles If using a virtual environment, ensure it is specified as the venv_path property in databricks.yml, or activate the environment before running CLI commands: experimental: - pydabs: + python: venv_path: .venv ` @@ -588,7 +661,7 @@ or activate the environment before running CLI commands: assert.Equal(t, expected, out) } -func withProcessStub(t *testing.T, args []string, output string, diagnostics string) context.Context { +func withProcessStub(t *testing.T, args []string, output, diagnostics string) context.Context { ctx := context.Background() ctx, stub := process.WithStub(ctx) @@ -611,10 +684,10 @@ func withProcessStub(t *testing.T, args []string, output string, diagnostics str assert.NoError(t, err) if reflect.DeepEqual(actual.Args, args) { - err := os.WriteFile(outputPath, []byte(output), 0600) + err := os.WriteFile(outputPath, []byte(output), 0o600) require.NoError(t, err) - err = os.WriteFile(diagnosticsPath, []byte(diagnostics), 0600) + err = os.WriteFile(diagnosticsPath, []byte(diagnostics), 0o600) require.NoError(t, err) return nil @@ -626,7 +699,7 @@ func withProcessStub(t *testing.T, args []string, output string, diagnostics str return ctx } -func loadYaml(name string, content string) *bundle.Bundle { +func loadYaml(name, content string) *bundle.Bundle { v, diag := config.LoadFromBytes(name, []byte(content)) if diag.Error() != nil { @@ -650,17 +723,17 @@ func withFakeVEnv(t *testing.T, venvPath string) { interpreterPath := interpreterPath(venvPath) - err = os.MkdirAll(filepath.Dir(interpreterPath), 0755) + err = os.MkdirAll(filepath.Dir(interpreterPath), 0o755) if err != nil { panic(err) } - err = os.WriteFile(interpreterPath, []byte(""), 0755) + err = os.WriteFile(interpreterPath, []byte(""), 0o755) if err != nil { panic(err) } - err = os.WriteFile(filepath.Join(venvPath, "pyvenv.cfg"), []byte(""), 0755) + err = os.WriteFile(filepath.Join(venvPath, "pyvenv.cfg"), []byte(""), 0o755) if err != nil { panic(err) } @@ -674,7 +747,7 @@ func withFakeVEnv(t *testing.T, venvPath string) { func interpreterPath(venvPath string) string { if runtime.GOOS == "windows" { - return filepath.Join(venvPath, "Scripts", "python3.exe") + return filepath.Join(venvPath, "Scripts", "python.exe") } else { return filepath.Join(venvPath, "bin", "python3") } diff --git a/bundle/config/mutator/resolve_resource_references.go b/bundle/config/mutator/resolve_resource_references.go index 89eaa346c..20a5b6585 100644 --- a/bundle/config/mutator/resolve_resource_references.go +++ b/bundle/config/mutator/resolve_resource_references.go @@ -36,11 +36,11 @@ func (m *resolveResourceReferences) Apply(ctx context.Context, b *bundle.Bundle) return fmt.Errorf("failed to resolve %s, err: %w", v.Lookup, err) } - v.Set(id) - return nil + return v.Set(id) }) } + // Note, diags are lost from all goroutines except the first one to return diag return diag.FromErr(errs.Wait()) } diff --git a/bundle/config/mutator/resolve_resource_references_test.go b/bundle/config/mutator/resolve_resource_references_test.go index ee2f0e2ea..624e337c7 100644 --- a/bundle/config/mutator/resolve_resource_references_test.go +++ b/bundle/config/mutator/resolve_resource_references_test.go @@ -108,7 +108,8 @@ func TestNoLookupIfVariableIsSet(t *testing.T) { m := mocks.NewMockWorkspaceClient(t) b.SetWorkpaceClient(m.WorkspaceClient) - b.Config.Variables["my-cluster-id"].Set("random value") + err := b.Config.Variables["my-cluster-id"].Set("random value") + require.NoError(t, err) diags := bundle.Apply(context.Background(), b, ResolveResourceReferences()) require.NoError(t, diags.Error()) diff --git a/bundle/config/mutator/resolve_variable_references.go b/bundle/config/mutator/resolve_variable_references.go index 5e5b76109..7ad3dfd8d 100644 --- a/bundle/config/mutator/resolve_variable_references.go +++ b/bundle/config/mutator/resolve_variable_references.go @@ -2,9 +2,10 @@ package mutator import ( "context" - "fmt" + "errors" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config/variable" "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/dyn" @@ -15,7 +16,7 @@ import ( type resolveVariableReferences struct { prefixes []string pattern dyn.Pattern - lookupFn func(dyn.Value, dyn.Path) (dyn.Value, error) + lookupFn func(dyn.Value, dyn.Path, *bundle.Bundle) (dyn.Value, error) skipFn func(dyn.Value) bool } @@ -32,27 +33,33 @@ func ResolveVariableReferencesInLookup() bundle.Mutator { } func ResolveVariableReferencesInComplexVariables() bundle.Mutator { - return &resolveVariableReferences{prefixes: []string{ - "bundle", - "workspace", - "variables", - }, + return &resolveVariableReferences{ + prefixes: []string{ + "bundle", + "workspace", + "variables", + }, pattern: dyn.NewPattern(dyn.Key("variables"), dyn.AnyKey(), dyn.Key("value")), lookupFn: lookupForComplexVariables, skipFn: skipResolvingInNonComplexVariables, } } -func lookup(v dyn.Value, path dyn.Path) (dyn.Value, error) { +func lookup(v dyn.Value, path dyn.Path, b *bundle.Bundle) (dyn.Value, error) { + if config.IsExplicitlyEnabled(b.Config.Presets.SourceLinkedDeployment) { + if path.String() == "workspace.file_path" { + return dyn.V(b.SyncRootPath), nil + } + } // Future opportunity: if we lookup this path in both the given root // and the synthesized root, we know if it was explicitly set or implied to be empty. // Then we can emit a warning if it was not explicitly set. return dyn.GetByPath(v, path) } -func lookupForComplexVariables(v dyn.Value, path dyn.Path) (dyn.Value, error) { +func lookupForComplexVariables(v dyn.Value, path dyn.Path, b *bundle.Bundle) (dyn.Value, error) { if path[0].Key() != "variables" { - return lookup(v, path) + return lookup(v, path, b) } varV, err := dyn.GetByPath(v, path[:len(path)-1]) @@ -67,10 +74,10 @@ func lookupForComplexVariables(v dyn.Value, path dyn.Path) (dyn.Value, error) { } if vv.Type == variable.VariableTypeComplex { - return dyn.InvalidValue, fmt.Errorf("complex variables cannot contain references to another complex variables") + return dyn.InvalidValue, errors.New("complex variables cannot contain references to another complex variables") } - return lookup(v, path) + return lookup(v, path, b) } func skipResolvingInNonComplexVariables(v dyn.Value) bool { @@ -82,9 +89,9 @@ func skipResolvingInNonComplexVariables(v dyn.Value) bool { } } -func lookupForVariables(v dyn.Value, path dyn.Path) (dyn.Value, error) { +func lookupForVariables(v dyn.Value, path dyn.Path, b *bundle.Bundle) (dyn.Value, error) { if path[0].Key() != "variables" { - return lookup(v, path) + return lookup(v, path, b) } varV, err := dyn.GetByPath(v, path[:len(path)-1]) @@ -99,10 +106,10 @@ func lookupForVariables(v dyn.Value, path dyn.Path) (dyn.Value, error) { } if vv.Lookup != nil && vv.Lookup.String() != "" { - return dyn.InvalidValue, fmt.Errorf("lookup variables cannot contain references to another lookup variables") + return dyn.InvalidValue, errors.New("lookup variables cannot contain references to another lookup variables") } - return lookup(v, path) + return lookup(v, path, b) } func (*resolveVariableReferences) Name() string { @@ -124,6 +131,7 @@ func (m *resolveVariableReferences) Apply(ctx context.Context, b *bundle.Bundle) varPath := dyn.NewPath(dyn.Key("var")) var diags diag.Diagnostics + err := b.Config.Mutate(func(root dyn.Value) (dyn.Value, error) { // Synthesize a copy of the root that has all fields that are present in the type // but not set in the dynamic value set to their corresponding empty value. @@ -166,14 +174,13 @@ func (m *resolveVariableReferences) Apply(ctx context.Context, b *bundle.Bundle) if m.skipFn != nil && m.skipFn(v) { return dyn.InvalidValue, dynvar.ErrSkipResolution } - return m.lookupFn(normalized, path) + return m.lookupFn(normalized, path, b) } } return dyn.InvalidValue, dynvar.ErrSkipResolution }) }) - if err != nil { return dyn.InvalidValue, err } @@ -184,7 +191,6 @@ func (m *resolveVariableReferences) Apply(ctx context.Context, b *bundle.Bundle) diags = diags.Extend(normaliseDiags) return root, nil }) - if err != nil { diags = diags.Extend(diag.FromErr(err)) } diff --git a/bundle/config/mutator/resolve_variable_references_test.go b/bundle/config/mutator/resolve_variable_references_test.go index 7bb6f11a0..18bb022aa 100644 --- a/bundle/config/mutator/resolve_variable_references_test.go +++ b/bundle/config/mutator/resolve_variable_references_test.go @@ -12,36 +12,11 @@ import ( "github.com/databricks/cli/libs/dyn" "github.com/databricks/databricks-sdk-go/service/compute" "github.com/databricks/databricks-sdk-go/service/jobs" + "github.com/databricks/databricks-sdk-go/service/pipelines" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) -func TestResolveVariableReferences(t *testing.T) { - b := &bundle.Bundle{ - Config: config.Root{ - Bundle: config.Bundle{ - Name: "example", - }, - Workspace: config.Workspace{ - RootPath: "${bundle.name}/bar", - FilePath: "${workspace.root_path}/baz", - }, - }, - } - - // Apply with an invalid prefix. This should not change the workspace root path. - diags := bundle.Apply(context.Background(), b, ResolveVariableReferences("doesntexist")) - require.NoError(t, diags.Error()) - require.Equal(t, "${bundle.name}/bar", b.Config.Workspace.RootPath) - require.Equal(t, "${workspace.root_path}/baz", b.Config.Workspace.FilePath) - - // Apply with a valid prefix. This should change the workspace root path. - diags = bundle.Apply(context.Background(), b, ResolveVariableReferences("bundle", "workspace")) - require.NoError(t, diags.Error()) - require.Equal(t, "example/bar", b.Config.Workspace.RootPath) - require.Equal(t, "example/bar/baz", b.Config.Workspace.FilePath) -} - func TestResolveVariableReferencesToBundleVariables(t *testing.T) { b := &bundle.Bundle{ Config: config.Root{ @@ -65,37 +40,6 @@ func TestResolveVariableReferencesToBundleVariables(t *testing.T) { require.Equal(t, "example/bar", b.Config.Workspace.RootPath) } -func TestResolveVariableReferencesToEmptyFields(t *testing.T) { - b := &bundle.Bundle{ - Config: config.Root{ - Bundle: config.Bundle{ - Name: "example", - Git: config.Git{ - Branch: "", - }, - }, - Resources: config.Resources{ - Jobs: map[string]*resources.Job{ - "job1": { - JobSettings: &jobs.JobSettings{ - Tags: map[string]string{ - "git_branch": "${bundle.git.branch}", - }, - }, - }, - }, - }, - }, - } - - // Apply for the bundle prefix. - diags := bundle.Apply(context.Background(), b, ResolveVariableReferences("bundle")) - require.NoError(t, diags.Error()) - - // The job settings should have been interpolated to an empty string. - require.Equal(t, "", b.Config.Resources.Jobs["job1"].JobSettings.Tags["git_branch"]) -} - func TestResolveVariableReferencesForPrimitiveNonStringFields(t *testing.T) { var diags diag.Diagnostics @@ -185,11 +129,11 @@ func TestResolveVariableReferencesForPrimitiveNonStringFields(t *testing.T) { // Apply for the variable prefix. This should resolve the variables to their values. diags = bundle.Apply(context.Background(), b, ResolveVariableReferences("variables")) require.NoError(t, diags.Error()) - assert.Equal(t, true, b.Config.Resources.Jobs["job1"].JobSettings.NotificationSettings.NoAlertForCanceledRuns) - assert.Equal(t, true, b.Config.Resources.Jobs["job1"].JobSettings.NotificationSettings.NoAlertForSkippedRuns) + assert.True(t, b.Config.Resources.Jobs["job1"].JobSettings.NotificationSettings.NoAlertForCanceledRuns) + assert.True(t, b.Config.Resources.Jobs["job1"].JobSettings.NotificationSettings.NoAlertForSkippedRuns) assert.Equal(t, 1, b.Config.Resources.Jobs["job1"].JobSettings.Tasks[0].NewCluster.Autoscale.MinWorkers) assert.Equal(t, 2, b.Config.Resources.Jobs["job1"].JobSettings.Tasks[0].NewCluster.Autoscale.MaxWorkers) - assert.Equal(t, 0.5, b.Config.Resources.Jobs["job1"].JobSettings.Tasks[0].NewCluster.AzureAttributes.SpotBidMaxPrice) + assert.InDelta(t, 0.5, b.Config.Resources.Jobs["job1"].JobSettings.Tasks[0].NewCluster.AzureAttributes.SpotBidMaxPrice, 0.0001) } func TestResolveComplexVariable(t *testing.T) { @@ -250,63 +194,6 @@ func TestResolveComplexVariable(t *testing.T) { require.Equal(t, 2, b.Config.Resources.Jobs["job1"].JobSettings.JobClusters[0].NewCluster.NumWorkers) } -func TestResolveComplexVariableReferencesToFields(t *testing.T) { - b := &bundle.Bundle{ - Config: config.Root{ - Bundle: config.Bundle{ - Name: "example", - }, - Variables: map[string]*variable.Variable{ - "cluster": { - Value: map[string]any{ - "node_type_id": "Standard_DS3_v2", - "num_workers": 2, - }, - Type: variable.VariableTypeComplex, - }, - }, - - Resources: config.Resources{ - Jobs: map[string]*resources.Job{ - "job1": { - JobSettings: &jobs.JobSettings{ - JobClusters: []jobs.JobCluster{ - { - NewCluster: compute.ClusterSpec{ - NodeTypeId: "random", - }, - }, - }, - }, - }, - }, - }, - }, - } - - ctx := context.Background() - - // Assign the variables to the dynamic configuration. - diags := bundle.ApplyFunc(ctx, b, func(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { - err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) { - var p dyn.Path - var err error - - p = dyn.MustPathFromString("resources.jobs.job1.job_clusters[0].new_cluster") - v, err = dyn.SetByPath(v, p.Append(dyn.Key("node_type_id")), dyn.V("${var.cluster.node_type_id}")) - require.NoError(t, err) - - return v, nil - }) - return diag.FromErr(err) - }) - require.NoError(t, diags.Error()) - - diags = bundle.Apply(ctx, b, ResolveVariableReferences("bundle", "workspace", "variables")) - require.NoError(t, diags.Error()) - require.Equal(t, "Standard_DS3_v2", b.Config.Resources.Jobs["job1"].JobSettings.JobClusters[0].NewCluster.NodeTypeId) -} - func TestResolveComplexVariableReferencesWithComplexVariablesError(t *testing.T) { b := &bundle.Bundle{ Config: config.Root{ @@ -434,3 +321,57 @@ func TestResolveComplexVariableWithVarReference(t *testing.T) { require.NoError(t, diags.Error()) require.Equal(t, "cicd_template==1.0.0", b.Config.Resources.Jobs["job1"].JobSettings.Tasks[0].Libraries[0].Pypi.Package) } + +func TestResolveVariableReferencesWithSourceLinkedDeployment(t *testing.T) { + testCases := []struct { + enabled bool + assert func(t *testing.T, b *bundle.Bundle) + }{ + { + true, + func(t *testing.T, b *bundle.Bundle) { + // Variables that use workspace file path should have SyncRootValue during resolution phase + require.Equal(t, "sync/root/path", b.Config.Resources.Pipelines["pipeline1"].PipelineSpec.Configuration["source"]) + + // The file path itself should remain the same + require.Equal(t, "file/path", b.Config.Workspace.FilePath) + }, + }, + { + false, + func(t *testing.T, b *bundle.Bundle) { + require.Equal(t, "file/path", b.Config.Resources.Pipelines["pipeline1"].PipelineSpec.Configuration["source"]) + require.Equal(t, "file/path", b.Config.Workspace.FilePath) + }, + }, + } + + for _, testCase := range testCases { + b := &bundle.Bundle{ + SyncRootPath: "sync/root/path", + Config: config.Root{ + Presets: config.Presets{ + SourceLinkedDeployment: &testCase.enabled, + }, + Workspace: config.Workspace{ + FilePath: "file/path", + }, + Resources: config.Resources{ + Pipelines: map[string]*resources.Pipeline{ + "pipeline1": { + PipelineSpec: &pipelines.PipelineSpec{ + Configuration: map[string]string{ + "source": "${workspace.file_path}", + }, + }, + }, + }, + }, + }, + } + + diags := bundle.Apply(context.Background(), b, ResolveVariableReferences("workspace")) + require.NoError(t, diags.Error()) + testCase.assert(t, b) + } +} diff --git a/bundle/config/mutator/rewrite_workspace_prefix.go b/bundle/config/mutator/rewrite_workspace_prefix.go index 8a39ee8a1..0ccb3314b 100644 --- a/bundle/config/mutator/rewrite_workspace_prefix.go +++ b/bundle/config/mutator/rewrite_workspace_prefix.go @@ -63,7 +63,6 @@ func (m *rewriteWorkspacePrefix) Apply(ctx context.Context, b *bundle.Bundle) di return v, nil }) }) - if err != nil { return diag.FromErr(err) } diff --git a/bundle/config/mutator/rewrite_workspace_prefix_test.go b/bundle/config/mutator/rewrite_workspace_prefix_test.go index d75ec89db..099738c02 100644 --- a/bundle/config/mutator/rewrite_workspace_prefix_test.go +++ b/bundle/config/mutator/rewrite_workspace_prefix_test.go @@ -71,7 +71,7 @@ func TestNoWorkspacePrefixUsed(t *testing.T) { } for _, d := range diags { - require.Equal(t, d.Severity, diag.Warning) + require.Equal(t, diag.Warning, d.Severity) require.Contains(t, expectedErrors, d.Summary) delete(expectedErrors, d.Summary) } @@ -81,5 +81,4 @@ func TestNoWorkspacePrefixUsed(t *testing.T) { require.Equal(t, "${workspace.artifact_path}/jar1.jar", b.Config.Resources.Jobs["test_job"].JobSettings.Tasks[1].Libraries[0].Jar) require.Equal(t, "${workspace.file_path}/notebook2", b.Config.Resources.Jobs["test_job"].JobSettings.Tasks[2].NotebookTask.NotebookPath) require.Equal(t, "${workspace.artifact_path}/jar2.jar", b.Config.Resources.Jobs["test_job"].JobSettings.Tasks[2].Libraries[0].Jar) - } diff --git a/bundle/config/mutator/run_as.go b/bundle/config/mutator/run_as.go index 0ca71e28e..7ffd782c2 100644 --- a/bundle/config/mutator/run_as.go +++ b/bundle/config/mutator/run_as.go @@ -12,8 +12,7 @@ import ( "github.com/databricks/databricks-sdk-go/service/jobs" ) -type setRunAs struct { -} +type setRunAs struct{} // This mutator does two things: // @@ -30,7 +29,7 @@ func (m *setRunAs) Name() string { return "SetRunAs" } -func reportRunAsNotSupported(resourceType string, location dyn.Location, currentUser string, runAsUser string) diag.Diagnostics { +func reportRunAsNotSupported(resourceType string, location dyn.Location, currentUser, runAsUser string) diag.Diagnostics { return diag.Diagnostics{{ Summary: fmt.Sprintf("%s do not support a setting a run_as user that is different from the owner.\n"+ "Current identity: %s. Run as identity: %s.\n"+ diff --git a/bundle/config/mutator/set_variables.go b/bundle/config/mutator/set_variables.go index 47ce2ad03..9e9f2dcfe 100644 --- a/bundle/config/mutator/set_variables.go +++ b/bundle/config/mutator/set_variables.go @@ -65,7 +65,6 @@ func setVariable(ctx context.Context, v dyn.Value, variable *variable.Variable, // We should have had a value to set for the variable at this point. return dyn.InvalidValue, fmt.Errorf(`no value assigned to required variable %s. Assignment can be done through the "--var" flag or by setting the %s environment variable`, name, bundleVarPrefix+name) - } func (m *setVariables) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { diff --git a/bundle/config/mutator/set_variables_test.go b/bundle/config/mutator/set_variables_test.go index d9719793f..07a5c8214 100644 --- a/bundle/config/mutator/set_variables_test.go +++ b/bundle/config/mutator/set_variables_test.go @@ -30,7 +30,7 @@ func TestSetVariableFromProcessEnvVar(t *testing.T) { err = convert.ToTyped(&variable, v) require.NoError(t, err) - assert.Equal(t, variable.Value, "process-env") + assert.Equal(t, "process-env", variable.Value) } func TestSetVariableUsingDefaultValue(t *testing.T) { @@ -48,7 +48,7 @@ func TestSetVariableUsingDefaultValue(t *testing.T) { err = convert.ToTyped(&variable, v) require.NoError(t, err) - assert.Equal(t, variable.Value, "default") + assert.Equal(t, "default", variable.Value) } func TestSetVariableWhenAlreadyAValueIsAssigned(t *testing.T) { @@ -70,7 +70,7 @@ func TestSetVariableWhenAlreadyAValueIsAssigned(t *testing.T) { err = convert.ToTyped(&variable, v) require.NoError(t, err) - assert.Equal(t, variable.Value, "assigned-value") + assert.Equal(t, "assigned-value", variable.Value) } func TestSetVariableEnvVarValueDoesNotOverridePresetValue(t *testing.T) { @@ -95,7 +95,7 @@ func TestSetVariableEnvVarValueDoesNotOverridePresetValue(t *testing.T) { err = convert.ToTyped(&variable, v) require.NoError(t, err) - assert.Equal(t, variable.Value, "assigned-value") + assert.Equal(t, "assigned-value", variable.Value) } func TestSetVariablesErrorsIfAValueCouldNotBeResolved(t *testing.T) { diff --git a/bundle/config/mutator/sync_infer_root.go b/bundle/config/mutator/sync_infer_root.go index 512adcdbf..160fcc908 100644 --- a/bundle/config/mutator/sync_infer_root.go +++ b/bundle/config/mutator/sync_infer_root.go @@ -35,7 +35,7 @@ func (m *syncInferRoot) Name() string { // If the path does not exist, it returns an empty string. // // See "sync_infer_root_internal_test.go" for examples. -func (m *syncInferRoot) computeRoot(path string, root string) string { +func (m *syncInferRoot) computeRoot(path, root string) string { for !filepath.IsLocal(path) { // Break if we have reached the root of the filesystem. dir := filepath.Dir(root) diff --git a/bundle/config/mutator/translate_paths.go b/bundle/config/mutator/translate_paths.go index 5e016d8a1..af0f94120 100644 --- a/bundle/config/mutator/translate_paths.go +++ b/bundle/config/mutator/translate_paths.go @@ -275,8 +275,8 @@ func (m *translatePaths) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnos } func gatherFallbackPaths(v dyn.Value, typ string) (map[string]string, error) { - var fallback = make(map[string]string) - var pattern = dyn.NewPattern(dyn.Key("resources"), dyn.Key(typ), dyn.AnyKey()) + fallback := make(map[string]string) + pattern := dyn.NewPattern(dyn.Key("resources"), dyn.Key(typ), dyn.AnyKey()) // Previous behavior was to use a resource's location as the base path to resolve // relative paths in its definition. With the introduction of [dyn.Value] throughout, diff --git a/bundle/config/mutator/translate_paths_test.go b/bundle/config/mutator/translate_paths_test.go index bf6ba15d8..493abb8c5 100644 --- a/bundle/config/mutator/translate_paths_test.go +++ b/bundle/config/mutator/translate_paths_test.go @@ -28,12 +28,13 @@ import ( func touchNotebookFile(t *testing.T, path string) { f, err := os.Create(path) require.NoError(t, err) - f.WriteString("# Databricks notebook source\n") + _, err = f.WriteString("# Databricks notebook source\n") + require.NoError(t, err) f.Close() } func touchEmptyFile(t *testing.T, path string) { - err := os.MkdirAll(filepath.Dir(path), 0700) + err := os.MkdirAll(filepath.Dir(path), 0o700) require.NoError(t, err) f, err := os.Create(path) require.NoError(t, err) diff --git a/bundle/config/mutator/verify_cli_version.go b/bundle/config/mutator/verify_cli_version.go index 279af44e6..873e4f780 100644 --- a/bundle/config/mutator/verify_cli_version.go +++ b/bundle/config/mutator/verify_cli_version.go @@ -15,8 +15,7 @@ func VerifyCliVersion() bundle.Mutator { return &verifyCliVersion{} } -type verifyCliVersion struct { -} +type verifyCliVersion struct{} func (v *verifyCliVersion) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { // No constraints specified, skip the check. diff --git a/bundle/config/presets.go b/bundle/config/presets.go index 30f56c0f8..252c5b5f7 100644 --- a/bundle/config/presets.go +++ b/bundle/config/presets.go @@ -1,7 +1,9 @@ package config -const Paused = "PAUSED" -const Unpaused = "UNPAUSED" +const ( + Paused = "PAUSED" + Unpaused = "UNPAUSED" +) type Presets struct { // NamePrefix to prepend to all resource names. diff --git a/bundle/config/resources/clusters.go b/bundle/config/resources/clusters.go index ba991e865..073f40a79 100644 --- a/bundle/config/resources/clusters.go +++ b/bundle/config/resources/clusters.go @@ -2,7 +2,6 @@ package resources import ( "context" - "fmt" "net/url" "github.com/databricks/cli/libs/log" @@ -45,7 +44,7 @@ func (s *Cluster) InitializeURL(baseURL url.URL) { if s.ID == "" { return } - baseURL.Path = fmt.Sprintf("compute/clusters/%s", s.ID) + baseURL.Path = "compute/clusters/" + s.ID s.URL = baseURL.String() } diff --git a/bundle/config/resources/job.go b/bundle/config/resources/job.go index 0aa41b2e8..76de78439 100644 --- a/bundle/config/resources/job.go +++ b/bundle/config/resources/job.go @@ -2,7 +2,6 @@ package resources import ( "context" - "fmt" "net/url" "strconv" @@ -52,7 +51,7 @@ func (j *Job) InitializeURL(baseURL url.URL) { if j.ID == "" { return } - baseURL.Path = fmt.Sprintf("jobs/%s", j.ID) + baseURL.Path = "jobs/" + j.ID j.URL = baseURL.String() } diff --git a/bundle/config/resources/mlflow_experiment.go b/bundle/config/resources/mlflow_experiment.go index 5d179ec0f..ea18ce114 100644 --- a/bundle/config/resources/mlflow_experiment.go +++ b/bundle/config/resources/mlflow_experiment.go @@ -2,7 +2,6 @@ package resources import ( "context" - "fmt" "net/url" "github.com/databricks/cli/libs/log" @@ -47,7 +46,7 @@ func (s *MlflowExperiment) InitializeURL(baseURL url.URL) { if s.ID == "" { return } - baseURL.Path = fmt.Sprintf("ml/experiments/%s", s.ID) + baseURL.Path = "ml/experiments/" + s.ID s.URL = baseURL.String() } diff --git a/bundle/config/resources/mlflow_model.go b/bundle/config/resources/mlflow_model.go index 72376f45d..69ae2d438 100644 --- a/bundle/config/resources/mlflow_model.go +++ b/bundle/config/resources/mlflow_model.go @@ -2,7 +2,6 @@ package resources import ( "context" - "fmt" "net/url" "github.com/databricks/cli/libs/log" @@ -47,7 +46,7 @@ func (s *MlflowModel) InitializeURL(baseURL url.URL) { if s.ID == "" { return } - baseURL.Path = fmt.Sprintf("ml/models/%s", s.ID) + baseURL.Path = "ml/models/" + s.ID s.URL = baseURL.String() } diff --git a/bundle/config/resources/model_serving_endpoint.go b/bundle/config/resources/model_serving_endpoint.go index a3c472b3f..8b1394d86 100644 --- a/bundle/config/resources/model_serving_endpoint.go +++ b/bundle/config/resources/model_serving_endpoint.go @@ -2,7 +2,6 @@ package resources import ( "context" - "fmt" "net/url" "github.com/databricks/cli/libs/log" @@ -55,7 +54,7 @@ func (s *ModelServingEndpoint) InitializeURL(baseURL url.URL) { if s.ID == "" { return } - baseURL.Path = fmt.Sprintf("ml/endpoints/%s", s.ID) + baseURL.Path = "ml/endpoints/" + s.ID s.URL = baseURL.String() } diff --git a/bundle/config/resources/permission.go b/bundle/config/resources/permission.go index 62e18a09e..fa1568601 100644 --- a/bundle/config/resources/permission.go +++ b/bundle/config/resources/permission.go @@ -25,5 +25,5 @@ func (p Permission) String() string { return fmt.Sprintf("level: %s, group_name: %s", p.Level, p.GroupName) } - return fmt.Sprintf("level: %s", p.Level) + return "level: " + p.Level } diff --git a/bundle/config/resources/pipeline.go b/bundle/config/resources/pipeline.go index eaa4c5368..5127d07ba 100644 --- a/bundle/config/resources/pipeline.go +++ b/bundle/config/resources/pipeline.go @@ -2,7 +2,6 @@ package resources import ( "context" - "fmt" "net/url" "github.com/databricks/cli/libs/log" @@ -47,7 +46,7 @@ func (p *Pipeline) InitializeURL(baseURL url.URL) { if p.ID == "" { return } - baseURL.Path = fmt.Sprintf("pipelines/%s", p.ID) + baseURL.Path = "pipelines/" + p.ID p.URL = baseURL.String() } diff --git a/bundle/config/resources/quality_monitor.go b/bundle/config/resources/quality_monitor.go index b1d7e08a5..88bc0a3e7 100644 --- a/bundle/config/resources/quality_monitor.go +++ b/bundle/config/resources/quality_monitor.go @@ -2,7 +2,6 @@ package resources import ( "context" - "fmt" "net/url" "strings" @@ -51,7 +50,7 @@ func (s *QualityMonitor) InitializeURL(baseURL url.URL) { if s.TableName == "" { return } - baseURL.Path = fmt.Sprintf("explore/data/%s", strings.ReplaceAll(s.TableName, ".", "/")) + baseURL.Path = "explore/data/" + strings.ReplaceAll(s.TableName, ".", "/") s.URL = baseURL.String() } diff --git a/bundle/config/resources/registered_model.go b/bundle/config/resources/registered_model.go index 8513a79ae..006eef773 100644 --- a/bundle/config/resources/registered_model.go +++ b/bundle/config/resources/registered_model.go @@ -2,7 +2,6 @@ package resources import ( "context" - "fmt" "net/url" "strings" @@ -57,7 +56,7 @@ func (s *RegisteredModel) InitializeURL(baseURL url.URL) { if s.ID == "" { return } - baseURL.Path = fmt.Sprintf("explore/data/models/%s", strings.ReplaceAll(s.ID, ".", "/")) + baseURL.Path = "explore/data/models/" + strings.ReplaceAll(s.ID, ".", "/") s.URL = baseURL.String() } diff --git a/bundle/config/resources/schema.go b/bundle/config/resources/schema.go index 8eadd7e46..b638907ac 100644 --- a/bundle/config/resources/schema.go +++ b/bundle/config/resources/schema.go @@ -2,7 +2,7 @@ package resources import ( "context" - "fmt" + "errors" "net/url" "strings" @@ -26,7 +26,7 @@ type Schema struct { } func (s *Schema) Exists(ctx context.Context, w *databricks.WorkspaceClient, id string) (bool, error) { - return false, fmt.Errorf("schema.Exists() is not supported") + return false, errors.New("schema.Exists() is not supported") } func (s *Schema) TerraformResourceName() string { @@ -37,7 +37,7 @@ func (s *Schema) InitializeURL(baseURL url.URL) { if s.ID == "" { return } - baseURL.Path = fmt.Sprintf("explore/data/%s", strings.ReplaceAll(s.ID, ".", "/")) + baseURL.Path = "explore/data/" + strings.ReplaceAll(s.ID, ".", "/") s.URL = baseURL.String() } diff --git a/bundle/config/resources/volume.go b/bundle/config/resources/volume.go index cae2a3463..882b7107d 100644 --- a/bundle/config/resources/volume.go +++ b/bundle/config/resources/volume.go @@ -2,7 +2,7 @@ package resources import ( "context" - "fmt" + "errors" "net/url" "strings" @@ -34,7 +34,7 @@ func (v Volume) MarshalJSON() ([]byte, error) { } func (v *Volume) Exists(ctx context.Context, w *databricks.WorkspaceClient, id string) (bool, error) { - return false, fmt.Errorf("volume.Exists() is not supported") + return false, errors.New("volume.Exists() is not supported") } func (v *Volume) TerraformResourceName() string { @@ -45,7 +45,7 @@ func (v *Volume) InitializeURL(baseURL url.URL) { if v.ID == "" { return } - baseURL.Path = fmt.Sprintf("explore/data/volumes/%s", strings.ReplaceAll(v.ID, ".", "/")) + baseURL.Path = "explore/data/volumes/" + strings.ReplaceAll(v.ID, ".", "/") v.URL = baseURL.String() } diff --git a/bundle/config/resources_test.go b/bundle/config/resources_test.go index 9ae73b22a..cbbcf5e27 100644 --- a/bundle/config/resources_test.go +++ b/bundle/config/resources_test.go @@ -33,15 +33,15 @@ func TestCustomMarshallerIsImplemented(t *testing.T) { r := Resources{} rt := reflect.TypeOf(r) - for i := 0; i < rt.NumField(); i++ { + for i := range rt.NumField() { field := rt.Field(i) // Fields in Resources are expected be of the form map[string]*resourceStruct - assert.Equal(t, field.Type.Kind(), reflect.Map, "Resource %s is not a map", field.Name) + assert.Equal(t, reflect.Map, field.Type.Kind(), "Resource %s is not a map", field.Name) kt := field.Type.Key() - assert.Equal(t, kt.Kind(), reflect.String, "Resource %s is not a map with string keys", field.Name) + assert.Equal(t, reflect.String, kt.Kind(), "Resource %s is not a map with string keys", field.Name) vt := field.Type.Elem() - assert.Equal(t, vt.Kind(), reflect.Ptr, "Resource %s is not a map with pointer values", field.Name) + assert.Equal(t, reflect.Ptr, vt.Kind(), "Resource %s is not a map with pointer values", field.Name) // Marshalling a resourceStruct will panic if resourceStruct does not have a custom marshaller // This is because resourceStruct embeds a Go SDK struct that implements @@ -49,7 +49,8 @@ func TestCustomMarshallerIsImplemented(t *testing.T) { // Eg: resource.Job implements MarshalJSON v := reflect.Zero(vt.Elem()).Interface() assert.NotPanics(t, func() { - json.Marshal(v) + _, err := json.Marshal(v) + assert.NoError(t, err) }, "Resource %s does not have a custom marshaller", field.Name) // Unmarshalling a *resourceStruct will panic if the resource does not have a custom unmarshaller @@ -58,7 +59,8 @@ func TestCustomMarshallerIsImplemented(t *testing.T) { // Eg: *resource.Job implements UnmarshalJSON v = reflect.New(vt.Elem()).Interface() assert.NotPanics(t, func() { - json.Unmarshal([]byte("{}"), v) + err := json.Unmarshal([]byte("{}"), v) + assert.NoError(t, err) }, "Resource %s does not have a custom unmarshaller", field.Name) } } @@ -73,7 +75,7 @@ func TestResourcesAllResourcesCompleteness(t *testing.T) { types = append(types, group.Description.PluralName) } - for i := 0; i < rt.NumField(); i++ { + for i := range rt.NumField() { field := rt.Field(i) jsonTag := field.Tag.Get("json") @@ -90,7 +92,7 @@ func TestSupportedResources(t *testing.T) { actual := SupportedResources() typ := reflect.TypeOf(Resources{}) - for i := 0; i < typ.NumField(); i++ { + for i := range typ.NumField() { field := typ.Field(i) jsonTags := strings.Split(field.Tag.Get("json"), ",") pluralName := jsonTags[0] diff --git a/bundle/config/root.go b/bundle/config/root.go index b2e622510..21804110a 100644 --- a/bundle/config/root.go +++ b/bundle/config/root.go @@ -102,7 +102,8 @@ func LoadFromBytes(path string, raw []byte) (*Root, diag.Diagnostics) { // Convert normalized configuration tree to typed configuration. err = r.updateWithDynamicValue(v) if err != nil { - return nil, diag.Errorf("failed to load %s: %v", path, err) + diags = diags.Extend(diag.Errorf("failed to load %s: %v", path, err)) + return nil, diags } return &r, diags } diff --git a/bundle/config/root_test.go b/bundle/config/root_test.go index a77f961bd..42fae49d9 100644 --- a/bundle/config/root_test.go +++ b/bundle/config/root_test.go @@ -100,7 +100,7 @@ func TestRootMergeTargetOverridesWithMode(t *testing.T) { }, }, } - root.initializeDynamicValue() + require.NoError(t, root.initializeDynamicValue()) require.NoError(t, root.MergeTargetOverrides("development")) assert.Equal(t, Development, root.Bundle.Mode) } @@ -156,7 +156,7 @@ func TestRootMergeTargetOverridesWithVariables(t *testing.T) { }, }, } - root.initializeDynamicValue() + require.NoError(t, root.initializeDynamicValue()) require.NoError(t, root.MergeTargetOverrides("development")) assert.Equal(t, "bar", root.Variables["foo"].Default) assert.Equal(t, "foo var", root.Variables["foo"].Description) @@ -168,7 +168,6 @@ func TestRootMergeTargetOverridesWithVariables(t *testing.T) { "key1": "value1", }, root.Variables["complex"].Default) assert.Equal(t, "complex var", root.Variables["complex"].Description) - } func TestIsFullVariableOverrideDef(t *testing.T) { @@ -252,5 +251,4 @@ func TestIsFullVariableOverrideDef(t *testing.T) { for i, tc := range testCases { assert.Equal(t, tc.expected, isFullVariableOverrideDef(tc.value), "test case %d", i) } - } diff --git a/bundle/config/validate/fast_validate.go b/bundle/config/validate/fast_validate.go new file mode 100644 index 000000000..47d83036d --- /dev/null +++ b/bundle/config/validate/fast_validate.go @@ -0,0 +1,51 @@ +package validate + +import ( + "context" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" +) + +// FastValidate runs a subset of fast validation checks. This is a subset of the full +// suite of validation mutators that satisfy ANY ONE of the following criteria: +// +// 1. No file i/o or network requests are made in the mutator. +// 2. The validation is blocking for bundle deployments. +// +// The full suite of validation mutators is available in the [Validate] mutator. +type fastValidateReadonly struct{} + +func FastValidateReadonly() bundle.ReadOnlyMutator { + return &fastValidateReadonly{} +} + +func (f *fastValidateReadonly) Name() string { + return "fast_validate(readonly)" +} + +func (f *fastValidateReadonly) Apply(ctx context.Context, rb bundle.ReadOnlyBundle) diag.Diagnostics { + return bundle.ApplyReadOnly(ctx, rb, bundle.Parallel( + // Fast mutators with only in-memory checks + JobClusterKeyDefined(), + JobTaskClusterSpec(), + SingleNodeCluster(), + + // Blocking mutators. Deployments will fail if these checks fail. + ValidateArtifactPath(), + )) +} + +type fastValidate struct{} + +func FastValidate() bundle.Mutator { + return &fastValidate{} +} + +func (f *fastValidate) Name() string { + return "fast_validate" +} + +func (f *fastValidate) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + return bundle.ApplyReadOnly(ctx, bundle.ReadOnly(b), FastValidateReadonly()) +} diff --git a/bundle/config/validate/files_to_sync.go b/bundle/config/validate/files_to_sync.go index a14278482..b4de06773 100644 --- a/bundle/config/validate/files_to_sync.go +++ b/bundle/config/validate/files_to_sync.go @@ -13,8 +13,7 @@ func FilesToSync() bundle.ReadOnlyMutator { return &filesToSync{} } -type filesToSync struct { -} +type filesToSync struct{} func (v *filesToSync) Name() string { return "validate:files_to_sync" diff --git a/bundle/config/validate/files_to_sync_test.go b/bundle/config/validate/files_to_sync_test.go index 30af9026d..dd40295c3 100644 --- a/bundle/config/validate/files_to_sync_test.go +++ b/bundle/config/validate/files_to_sync_test.go @@ -2,6 +2,7 @@ package validate import ( "context" + "path/filepath" "testing" "github.com/databricks/cli/bundle" @@ -81,12 +82,12 @@ func TestFilesToSync_EverythingIgnored(t *testing.T) { b := setupBundleForFilesToSyncTest(t) // Ignore all files. - testutil.WriteFile(t, "*\n.*\n", b.BundleRootPath, ".gitignore") + testutil.WriteFile(t, filepath.Join(b.BundleRootPath, ".gitignore"), "*\n.*\n") ctx := context.Background() rb := bundle.ReadOnly(b) diags := bundle.ApplyReadOnly(ctx, rb, FilesToSync()) - require.Equal(t, 1, len(diags)) + require.Len(t, diags, 1) assert.Equal(t, diag.Warning, diags[0].Severity) assert.Equal(t, "There are no files to sync, please check your .gitignore", diags[0].Summary) } @@ -100,7 +101,7 @@ func TestFilesToSync_EverythingExcluded(t *testing.T) { ctx := context.Background() rb := bundle.ReadOnly(b) diags := bundle.ApplyReadOnly(ctx, rb, FilesToSync()) - require.Equal(t, 1, len(diags)) + require.Len(t, diags, 1) assert.Equal(t, diag.Warning, diags[0].Severity) assert.Equal(t, "There are no files to sync, please check your .gitignore and sync.exclude configuration", diags[0].Summary) } diff --git a/bundle/config/validate/folder_permissions.go b/bundle/config/validate/folder_permissions.go index 505e82a1e..7b12b4b16 100644 --- a/bundle/config/validate/folder_permissions.go +++ b/bundle/config/validate/folder_permissions.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "path" + "strconv" "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/libraries" @@ -15,8 +16,7 @@ import ( "golang.org/x/sync/errgroup" ) -type folderPermissions struct { -} +type folderPermissions struct{} // Apply implements bundle.ReadOnlyMutator. func (f *folderPermissions) Apply(ctx context.Context, b bundle.ReadOnlyBundle) diag.Diagnostics { @@ -37,7 +37,8 @@ func (f *folderPermissions) Apply(ctx context.Context, b bundle.ReadOnlyBundle) } if err := g.Wait(); err != nil { - return diag.FromErr(err) + // Note, only diag from first coroutine is captured, others are lost + diags = diags.Extend(diag.FromErr(err)) } for _, r := range results { @@ -60,7 +61,7 @@ func checkFolderPermission(ctx context.Context, b bundle.ReadOnlyBundle, folderP } objPermissions, err := w.GetPermissions(ctx, workspace.GetWorkspaceObjectPermissionsRequest{ - WorkspaceObjectId: fmt.Sprint(obj.ObjectId), + WorkspaceObjectId: strconv.FormatInt(obj.ObjectId, 10), WorkspaceObjectType: "directories", }) if err != nil { diff --git a/bundle/config/validate/job_cluster_key_defined.go b/bundle/config/validate/job_cluster_key_defined.go index 368c3edb1..c3a1ab3df 100644 --- a/bundle/config/validate/job_cluster_key_defined.go +++ b/bundle/config/validate/job_cluster_key_defined.go @@ -13,8 +13,7 @@ func JobClusterKeyDefined() bundle.ReadOnlyMutator { return &jobClusterKeyDefined{} } -type jobClusterKeyDefined struct { -} +type jobClusterKeyDefined struct{} func (v *jobClusterKeyDefined) Name() string { return "validate:job_cluster_key_defined" diff --git a/bundle/config/validate/job_cluster_key_defined_test.go b/bundle/config/validate/job_cluster_key_defined_test.go index 176b0fedc..2cbdb7c6a 100644 --- a/bundle/config/validate/job_cluster_key_defined_test.go +++ b/bundle/config/validate/job_cluster_key_defined_test.go @@ -34,7 +34,7 @@ func TestJobClusterKeyDefined(t *testing.T) { } diags := bundle.ApplyReadOnly(context.Background(), bundle.ReadOnly(b), JobClusterKeyDefined()) - require.Len(t, diags, 0) + require.Empty(t, diags) require.NoError(t, diags.Error()) } @@ -59,8 +59,8 @@ func TestJobClusterKeyNotDefined(t *testing.T) { diags := bundle.ApplyReadOnly(context.Background(), bundle.ReadOnly(b), JobClusterKeyDefined()) require.Len(t, diags, 1) require.NoError(t, diags.Error()) - require.Equal(t, diags[0].Severity, diag.Warning) - require.Equal(t, diags[0].Summary, "job_cluster_key do-not-exist is not defined") + require.Equal(t, diag.Warning, diags[0].Severity) + require.Equal(t, "job_cluster_key do-not-exist is not defined", diags[0].Summary) } func TestJobClusterKeyDefinedInDifferentJob(t *testing.T) { @@ -92,6 +92,6 @@ func TestJobClusterKeyDefinedInDifferentJob(t *testing.T) { diags := bundle.ApplyReadOnly(context.Background(), bundle.ReadOnly(b), JobClusterKeyDefined()) require.Len(t, diags, 1) require.NoError(t, diags.Error()) - require.Equal(t, diags[0].Severity, diag.Warning) - require.Equal(t, diags[0].Summary, "job_cluster_key do-not-exist is not defined") + require.Equal(t, diag.Warning, diags[0].Severity) + require.Equal(t, "job_cluster_key do-not-exist is not defined", diags[0].Summary) } diff --git a/bundle/config/validate/job_task_cluster_spec.go b/bundle/config/validate/job_task_cluster_spec.go index b80befcdf..5f532acfe 100644 --- a/bundle/config/validate/job_task_cluster_spec.go +++ b/bundle/config/validate/job_task_cluster_spec.go @@ -17,8 +17,7 @@ func JobTaskClusterSpec() bundle.ReadOnlyMutator { return &jobTaskClusterSpec{} } -type jobTaskClusterSpec struct { -} +type jobTaskClusterSpec struct{} func (v *jobTaskClusterSpec) Name() string { return "validate:job_task_cluster_spec" diff --git a/bundle/config/validate/single_node_cluster_test.go b/bundle/config/validate/single_node_cluster_test.go index 18771cc00..c3ead8ef6 100644 --- a/bundle/config/validate/single_node_cluster_test.go +++ b/bundle/config/validate/single_node_cluster_test.go @@ -175,7 +175,6 @@ func TestValidateSingleNodeClusterFailForJobClusters(t *testing.T) { Paths: []dyn.Path{dyn.MustPathFromString("resources.jobs.foo.job_clusters[0].new_cluster")}, }, }, diags) - }) } } diff --git a/bundle/config/validate/unique_resource_keys.go b/bundle/config/validate/unique_resource_keys.go index 50295375b..d80c5d632 100644 --- a/bundle/config/validate/unique_resource_keys.go +++ b/bundle/config/validate/unique_resource_keys.go @@ -2,7 +2,6 @@ package validate import ( "context" - "fmt" "sort" "github.com/databricks/cli/bundle" @@ -102,7 +101,7 @@ func (m *uniqueResourceKeys) Apply(ctx context.Context, b *bundle.Bundle) diag.D // If there are multiple resources with the same key, report an error. diags = append(diags, diag.Diagnostic{ Severity: diag.Error, - Summary: fmt.Sprintf("multiple resources have been defined with the same key: %s", k), + Summary: "multiple resources have been defined with the same key: " + k, Locations: v.locations, Paths: v.paths, }) diff --git a/bundle/config/validate/validate.go b/bundle/config/validate/validate.go index eb4c3c3cd..8fdd704ab 100644 --- a/bundle/config/validate/validate.go +++ b/bundle/config/validate/validate.go @@ -8,8 +8,7 @@ import ( "github.com/databricks/cli/libs/dyn" ) -type validate struct { -} +type validate struct{} type location struct { path string @@ -31,12 +30,13 @@ func (l location) Path() dyn.Path { // Apply implements bundle.Mutator. func (v *validate) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { return bundle.ApplyReadOnly(ctx, bundle.ReadOnly(b), bundle.Parallel( - JobClusterKeyDefined(), + FastValidateReadonly(), + + // Slow mutators that require network or file i/o. These are only + // run in the `bundle validate` command. FilesToSync(), - ValidateSyncPatterns(), - JobTaskClusterSpec(), ValidateFolderPermissions(), - SingleNodeCluster(), + ValidateSyncPatterns(), )) } diff --git a/bundle/config/validate/validate_artifact_path.go b/bundle/config/validate/validate_artifact_path.go new file mode 100644 index 000000000..aa4492670 --- /dev/null +++ b/bundle/config/validate/validate_artifact_path.go @@ -0,0 +1,129 @@ +package validate + +import ( + "context" + "errors" + "fmt" + "slices" + "strings" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/libraries" + "github.com/databricks/cli/libs/diag" + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/dyn/dynvar" + "github.com/databricks/databricks-sdk-go/apierr" +) + +type validateArtifactPath struct{} + +func ValidateArtifactPath() bundle.ReadOnlyMutator { + return &validateArtifactPath{} +} + +func (v *validateArtifactPath) Name() string { + return "validate:artifact_paths" +} + +func extractVolumeFromPath(artifactPath string) (string, string, string, error) { + if !libraries.IsVolumesPath(artifactPath) { + return "", "", "", fmt.Errorf("expected artifact_path to start with /Volumes/, got %s", artifactPath) + } + + parts := strings.Split(artifactPath, "/") + volumeFormatErr := fmt.Errorf("expected UC volume path to be in the format /Volumes////..., got %s", artifactPath) + + // Incorrect format. + if len(parts) < 5 { + return "", "", "", volumeFormatErr + } + + catalogName := parts[2] + schemaName := parts[3] + volumeName := parts[4] + + // Incorrect format. + if catalogName == "" || schemaName == "" || volumeName == "" { + return "", "", "", volumeFormatErr + } + + return catalogName, schemaName, volumeName, nil +} + +func findVolumeInBundle(r config.Root, catalogName, schemaName, volumeName string) (dyn.Path, []dyn.Location, bool) { + volumes := r.Resources.Volumes + for k, v := range volumes { + if v.CatalogName != catalogName || v.Name != volumeName { + continue + } + // UC schemas can be defined in the bundle itself, and thus might be interpolated + // at runtime via the ${resources.schemas.} syntax. Thus we match the volume + // definition if the schema name is the same as the one in the bundle, or if the + // schema name is interpolated. + // We only have to check for ${resources.schemas...} references because any + // other valid reference (like ${var.foo}) would have been interpolated by this point. + p, ok := dynvar.PureReferenceToPath(v.SchemaName) + isSchemaDefinedInBundle := ok && p.HasPrefix(dyn.Path{dyn.Key("resources"), dyn.Key("schemas")}) + if v.SchemaName != schemaName && !isSchemaDefinedInBundle { + continue + } + pathString := "resources.volumes." + k + return dyn.MustPathFromString(pathString), r.GetLocations(pathString), true + } + return nil, nil, false +} + +func (v *validateArtifactPath) Apply(ctx context.Context, rb bundle.ReadOnlyBundle) diag.Diagnostics { + // We only validate UC Volumes paths right now. + if !libraries.IsVolumesPath(rb.Config().Workspace.ArtifactPath) { + return nil + } + + wrapErrorMsg := func(s string) diag.Diagnostics { + return diag.Diagnostics{ + { + Summary: s, + Severity: diag.Error, + Locations: rb.Config().GetLocations("workspace.artifact_path"), + Paths: []dyn.Path{dyn.MustPathFromString("workspace.artifact_path")}, + }, + } + } + + catalogName, schemaName, volumeName, err := extractVolumeFromPath(rb.Config().Workspace.ArtifactPath) + if err != nil { + return wrapErrorMsg(err.Error()) + } + volumeFullName := fmt.Sprintf("%s.%s.%s", catalogName, schemaName, volumeName) + w := rb.WorkspaceClient() + _, err = w.Volumes.ReadByName(ctx, volumeFullName) + + if errors.Is(err, apierr.ErrPermissionDenied) { + return wrapErrorMsg(fmt.Sprintf("cannot access volume %s: %s", volumeFullName, err)) + } + if errors.Is(err, apierr.ErrNotFound) { + path, locations, ok := findVolumeInBundle(rb.Config(), catalogName, schemaName, volumeName) + if !ok { + return wrapErrorMsg(fmt.Sprintf("volume %s does not exist", volumeFullName)) + } + + // If the volume is defined in the bundle, provide a more helpful error diagnostic, + // with more details and location information. + return diag.Diagnostics{{ + Summary: fmt.Sprintf("volume %s does not exist", volumeFullName), + Severity: diag.Error, + Detail: `You are using a volume in your artifact_path that is managed by +this bundle but which has not been deployed yet. Please first deploy +the volume using 'bundle deploy' and then switch over to using it in +the artifact_path.`, + Locations: slices.Concat(rb.Config().GetLocations("workspace.artifact_path"), locations), + Paths: append([]dyn.Path{dyn.MustPathFromString("workspace.artifact_path")}, path), + }} + + } + if err != nil { + return wrapErrorMsg(fmt.Sprintf("cannot read volume %s: %s", volumeFullName, err)) + } + return nil +} diff --git a/bundle/config/validate/validate_artifact_path_test.go b/bundle/config/validate/validate_artifact_path_test.go new file mode 100644 index 000000000..e1ae6af34 --- /dev/null +++ b/bundle/config/validate/validate_artifact_path_test.go @@ -0,0 +1,243 @@ +package validate + +import ( + "context" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/cli/bundle/internal/bundletest" + "github.com/databricks/cli/libs/diag" + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/databricks-sdk-go/apierr" + "github.com/databricks/databricks-sdk-go/experimental/mocks" + "github.com/databricks/databricks-sdk-go/service/catalog" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +func TestValidateArtifactPathWithVolumeInBundle(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Workspace: config.Workspace{ + ArtifactPath: "/Volumes/catalogN/schemaN/volumeN/abc", + }, + Resources: config.Resources{ + Volumes: map[string]*resources.Volume{ + "foo": { + CreateVolumeRequestContent: &catalog.CreateVolumeRequestContent{ + CatalogName: "catalogN", + Name: "volumeN", + SchemaName: "schemaN", + }, + }, + }, + }, + }, + } + + bundletest.SetLocation(b, "workspace.artifact_path", []dyn.Location{{File: "file", Line: 1, Column: 1}}) + bundletest.SetLocation(b, "resources.volumes.foo", []dyn.Location{{File: "file", Line: 2, Column: 2}}) + + ctx := context.Background() + m := mocks.NewMockWorkspaceClient(t) + api := m.GetMockVolumesAPI() + api.EXPECT().ReadByName(mock.Anything, "catalogN.schemaN.volumeN").Return(nil, &apierr.APIError{ + StatusCode: 404, + }) + b.SetWorkpaceClient(m.WorkspaceClient) + + diags := bundle.ApplyReadOnly(ctx, bundle.ReadOnly(b), ValidateArtifactPath()) + assert.Equal(t, diag.Diagnostics{{ + Severity: diag.Error, + Summary: "volume catalogN.schemaN.volumeN does not exist", + Locations: []dyn.Location{ + {File: "file", Line: 1, Column: 1}, + {File: "file", Line: 2, Column: 2}, + }, + Paths: []dyn.Path{ + dyn.MustPathFromString("workspace.artifact_path"), + dyn.MustPathFromString("resources.volumes.foo"), + }, + Detail: `You are using a volume in your artifact_path that is managed by +this bundle but which has not been deployed yet. Please first deploy +the volume using 'bundle deploy' and then switch over to using it in +the artifact_path.`, + }}, diags) +} + +func TestValidateArtifactPath(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Workspace: config.Workspace{ + ArtifactPath: "/Volumes/catalogN/schemaN/volumeN/abc", + }, + }, + } + + bundletest.SetLocation(b, "workspace.artifact_path", []dyn.Location{{File: "file", Line: 1, Column: 1}}) + assertDiags := func(t *testing.T, diags diag.Diagnostics, expected string) { + assert.Len(t, diags, 1) + assert.Equal(t, diag.Diagnostics{{ + Severity: diag.Error, + Summary: expected, + Locations: []dyn.Location{{File: "file", Line: 1, Column: 1}}, + Paths: []dyn.Path{dyn.MustPathFromString("workspace.artifact_path")}, + }}, diags) + } + + rb := bundle.ReadOnly(b) + ctx := context.Background() + + tcases := []struct { + err error + expectedSummary string + }{ + { + err: &apierr.APIError{ + StatusCode: 403, + Message: "User does not have USE SCHEMA on Schema 'catalogN.schemaN'", + }, + expectedSummary: "cannot access volume catalogN.schemaN.volumeN: User does not have USE SCHEMA on Schema 'catalogN.schemaN'", + }, + { + err: &apierr.APIError{ + StatusCode: 404, + }, + expectedSummary: "volume catalogN.schemaN.volumeN does not exist", + }, + { + err: &apierr.APIError{ + StatusCode: 500, + Message: "Internal Server Error", + }, + expectedSummary: "cannot read volume catalogN.schemaN.volumeN: Internal Server Error", + }, + } + + for _, tc := range tcases { + m := mocks.NewMockWorkspaceClient(t) + api := m.GetMockVolumesAPI() + api.EXPECT().ReadByName(mock.Anything, "catalogN.schemaN.volumeN").Return(nil, tc.err) + b.SetWorkpaceClient(m.WorkspaceClient) + + diags := bundle.ApplyReadOnly(ctx, rb, ValidateArtifactPath()) + assertDiags(t, diags, tc.expectedSummary) + } +} + +func invalidVolumePaths() []string { + return []string{ + "/Volumes/", + "/Volumes/main", + "/Volumes/main/", + "/Volumes/main//", + "/Volumes/main//my_schema", + "/Volumes/main/my_schema", + "/Volumes/main/my_schema/", + "/Volumes/main/my_schema//", + "/Volumes//my_schema/my_volume", + } +} + +func TestExtractVolumeFromPath(t *testing.T) { + catalogName, schemaName, volumeName, err := extractVolumeFromPath("/Volumes/main/my_schema/my_volume") + require.NoError(t, err) + assert.Equal(t, "main", catalogName) + assert.Equal(t, "my_schema", schemaName) + assert.Equal(t, "my_volume", volumeName) + + for _, p := range invalidVolumePaths() { + _, _, _, err := extractVolumeFromPath(p) + assert.EqualError(t, err, "expected UC volume path to be in the format /Volumes////..., got "+p) + } +} + +func TestValidateArtifactPathWithInvalidPaths(t *testing.T) { + for _, p := range invalidVolumePaths() { + b := &bundle.Bundle{ + Config: config.Root{ + Workspace: config.Workspace{ + ArtifactPath: p, + }, + }, + } + + bundletest.SetLocation(b, "workspace.artifact_path", []dyn.Location{{File: "config.yml", Line: 1, Column: 2}}) + + diags := bundle.ApplyReadOnly(context.Background(), bundle.ReadOnly(b), ValidateArtifactPath()) + require.Equal(t, diag.Diagnostics{{ + Severity: diag.Error, + Summary: "expected UC volume path to be in the format /Volumes////..., got " + p, + Locations: []dyn.Location{{File: "config.yml", Line: 1, Column: 2}}, + Paths: []dyn.Path{dyn.MustPathFromString("workspace.artifact_path")}, + }}, diags) + } +} + +func TestFindVolumeInBundle(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Resources: config.Resources{ + Volumes: map[string]*resources.Volume{ + "foo": { + CreateVolumeRequestContent: &catalog.CreateVolumeRequestContent{ + CatalogName: "main", + Name: "my_volume", + SchemaName: "my_schema", + }, + }, + }, + }, + }, + } + + bundletest.SetLocation(b, "resources.volumes.foo", []dyn.Location{ + { + File: "volume.yml", + Line: 1, + Column: 2, + }, + }) + + // volume is in DAB. + path, locations, ok := findVolumeInBundle(b.Config, "main", "my_schema", "my_volume") + assert.True(t, ok) + assert.Equal(t, []dyn.Location{{ + File: "volume.yml", + Line: 1, + Column: 2, + }}, locations) + assert.Equal(t, dyn.MustPathFromString("resources.volumes.foo"), path) + + // wrong volume name + _, _, ok = findVolumeInBundle(b.Config, "main", "my_schema", "doesnotexist") + assert.False(t, ok) + + // wrong schema name + _, _, ok = findVolumeInBundle(b.Config, "main", "doesnotexist", "my_volume") + assert.False(t, ok) + + // wrong catalog name + _, _, ok = findVolumeInBundle(b.Config, "doesnotexist", "my_schema", "my_volume") + assert.False(t, ok) + + // schema name is interpolated but does not have the right prefix. In this case + // we should not match the volume. + b.Config.Resources.Volumes["foo"].SchemaName = "${foo.bar.baz}" + _, _, ok = findVolumeInBundle(b.Config, "main", "my_schema", "my_volume") + assert.False(t, ok) + + // schema name is interpolated. + b.Config.Resources.Volumes["foo"].SchemaName = "${resources.schemas.my_schema.name}" + path, locations, ok = findVolumeInBundle(b.Config, "main", "valuedoesnotmatter", "my_volume") + assert.True(t, ok) + assert.Equal(t, []dyn.Location{{ + File: "volume.yml", + Line: 1, + Column: 2, + }}, locations) + assert.Equal(t, dyn.MustPathFromString("resources.volumes.foo"), path) +} diff --git a/bundle/config/validate/validate_sync_patterns.go b/bundle/config/validate/validate_sync_patterns.go index 52f06835c..f5787a81d 100644 --- a/bundle/config/validate/validate_sync_patterns.go +++ b/bundle/config/validate/validate_sync_patterns.go @@ -17,8 +17,7 @@ func ValidateSyncPatterns() bundle.ReadOnlyMutator { return &validateSyncPatterns{} } -type validateSyncPatterns struct { -} +type validateSyncPatterns struct{} func (v *validateSyncPatterns) Name() string { return "validate:validate_sync_patterns" diff --git a/bundle/config/variable/lookup.go b/bundle/config/variable/lookup.go index 37e380f18..71c8512e3 100755 --- a/bundle/config/variable/lookup.go +++ b/bundle/config/variable/lookup.go @@ -2,7 +2,7 @@ package variable import ( "context" - "fmt" + "errors" "github.com/databricks/databricks-sdk-go" ) @@ -83,11 +83,11 @@ func (l *Lookup) constructResolver() (resolver, error) { switch len(resolvers) { case 0: - return nil, fmt.Errorf("no valid lookup fields provided") + return nil, errors.New("no valid lookup fields provided") case 1: return resolvers[0], nil default: - return nil, fmt.Errorf("exactly one lookup field must be provided") + return nil, errors.New("exactly one lookup field must be provided") } } diff --git a/bundle/config/variable/lookup_test.go b/bundle/config/variable/lookup_test.go index a84748751..bcfcb4626 100644 --- a/bundle/config/variable/lookup_test.go +++ b/bundle/config/variable/lookup_test.go @@ -13,7 +13,7 @@ func TestLookup_Coverage(t *testing.T) { val := reflect.ValueOf(lookup) typ := val.Type() - for i := 0; i < val.NumField(); i++ { + for i := range val.NumField() { field := val.Field(i) if field.Kind() != reflect.String { t.Fatalf("Field %s is not a string", typ.Field(i).Name) @@ -42,7 +42,6 @@ func TestLookup_Empty(t *testing.T) { // No string representation for an invalid lookup assert.Empty(t, lookup.String()) - } func TestLookup_Multiple(t *testing.T) { diff --git a/bundle/config/variable/resolve_alert.go b/bundle/config/variable/resolve_alert.go index be83e81fa..507306aa0 100644 --- a/bundle/config/variable/resolve_alert.go +++ b/bundle/config/variable/resolve_alert.go @@ -2,7 +2,6 @@ package variable import ( "context" - "fmt" "github.com/databricks/databricks-sdk-go" ) @@ -16,9 +15,9 @@ func (l resolveAlert) Resolve(ctx context.Context, w *databricks.WorkspaceClient if err != nil { return "", err } - return fmt.Sprint(entity.Id), nil + return entity.Id, nil } func (l resolveAlert) String() string { - return fmt.Sprintf("alert: %s", l.name) + return "alert: " + l.name } diff --git a/bundle/config/variable/resolve_cluster.go b/bundle/config/variable/resolve_cluster.go index 2d68b7fb7..51278aef5 100644 --- a/bundle/config/variable/resolve_cluster.go +++ b/bundle/config/variable/resolve_cluster.go @@ -20,7 +20,6 @@ func (l resolveCluster) Resolve(ctx context.Context, w *databricks.WorkspaceClie ClusterSources: []compute.ClusterSource{compute.ClusterSourceApi, compute.ClusterSourceUi}, }, }) - if err != nil { return "", err } @@ -43,5 +42,5 @@ func (l resolveCluster) Resolve(ctx context.Context, w *databricks.WorkspaceClie } func (l resolveCluster) String() string { - return fmt.Sprintf("cluster: %s", l.name) + return "cluster: " + l.name } diff --git a/bundle/config/variable/resolve_cluster_policy.go b/bundle/config/variable/resolve_cluster_policy.go index b19380a63..94fd892b2 100644 --- a/bundle/config/variable/resolve_cluster_policy.go +++ b/bundle/config/variable/resolve_cluster_policy.go @@ -2,7 +2,6 @@ package variable import ( "context" - "fmt" "github.com/databricks/databricks-sdk-go" ) @@ -16,9 +15,9 @@ func (l resolveClusterPolicy) Resolve(ctx context.Context, w *databricks.Workspa if err != nil { return "", err } - return fmt.Sprint(entity.PolicyId), nil + return entity.PolicyId, nil } func (l resolveClusterPolicy) String() string { - return fmt.Sprintf("cluster-policy: %s", l.name) + return "cluster-policy: " + l.name } diff --git a/bundle/config/variable/resolve_dashboard.go b/bundle/config/variable/resolve_dashboard.go index 44fd45197..2979716ce 100644 --- a/bundle/config/variable/resolve_dashboard.go +++ b/bundle/config/variable/resolve_dashboard.go @@ -2,7 +2,6 @@ package variable import ( "context" - "fmt" "github.com/databricks/databricks-sdk-go" ) @@ -16,9 +15,9 @@ func (l resolveDashboard) Resolve(ctx context.Context, w *databricks.WorkspaceCl if err != nil { return "", err } - return fmt.Sprint(entity.Id), nil + return entity.Id, nil } func (l resolveDashboard) String() string { - return fmt.Sprintf("dashboard: %s", l.name) + return "dashboard: " + l.name } diff --git a/bundle/config/variable/resolve_instance_pool.go b/bundle/config/variable/resolve_instance_pool.go index cbf0775c9..600b47a50 100644 --- a/bundle/config/variable/resolve_instance_pool.go +++ b/bundle/config/variable/resolve_instance_pool.go @@ -2,7 +2,6 @@ package variable import ( "context" - "fmt" "github.com/databricks/databricks-sdk-go" ) @@ -16,9 +15,9 @@ func (l resolveInstancePool) Resolve(ctx context.Context, w *databricks.Workspac if err != nil { return "", err } - return fmt.Sprint(entity.InstancePoolId), nil + return entity.InstancePoolId, nil } func (l resolveInstancePool) String() string { - return fmt.Sprintf("instance-pool: %s", l.name) + return "instance-pool: " + l.name } diff --git a/bundle/config/variable/resolve_job.go b/bundle/config/variable/resolve_job.go index 3def64888..4fe6ae3e7 100644 --- a/bundle/config/variable/resolve_job.go +++ b/bundle/config/variable/resolve_job.go @@ -2,7 +2,7 @@ package variable import ( "context" - "fmt" + "strconv" "github.com/databricks/databricks-sdk-go" ) @@ -16,9 +16,9 @@ func (l resolveJob) Resolve(ctx context.Context, w *databricks.WorkspaceClient) if err != nil { return "", err } - return fmt.Sprint(entity.JobId), nil + return strconv.FormatInt(entity.JobId, 10), nil } func (l resolveJob) String() string { - return fmt.Sprintf("job: %s", l.name) + return "job: " + l.name } diff --git a/bundle/config/variable/resolve_metastore.go b/bundle/config/variable/resolve_metastore.go index 958e43787..8a0a8c7ed 100644 --- a/bundle/config/variable/resolve_metastore.go +++ b/bundle/config/variable/resolve_metastore.go @@ -2,7 +2,6 @@ package variable import ( "context" - "fmt" "github.com/databricks/databricks-sdk-go" ) @@ -16,9 +15,9 @@ func (l resolveMetastore) Resolve(ctx context.Context, w *databricks.WorkspaceCl if err != nil { return "", err } - return fmt.Sprint(entity.MetastoreId), nil + return entity.MetastoreId, nil } func (l resolveMetastore) String() string { - return fmt.Sprintf("metastore: %s", l.name) + return "metastore: " + l.name } diff --git a/bundle/config/variable/resolve_notification_destination.go b/bundle/config/variable/resolve_notification_destination.go index 4c4cd892a..4696a52c8 100644 --- a/bundle/config/variable/resolve_notification_destination.go +++ b/bundle/config/variable/resolve_notification_destination.go @@ -42,5 +42,5 @@ func (l resolveNotificationDestination) Resolve(ctx context.Context, w *databric } func (l resolveNotificationDestination) String() string { - return fmt.Sprintf("notification-destination: %s", l.name) + return "notification-destination: " + l.name } diff --git a/bundle/config/variable/resolve_notification_destination_test.go b/bundle/config/variable/resolve_notification_destination_test.go index 2b8201d15..f44b2f3e9 100644 --- a/bundle/config/variable/resolve_notification_destination_test.go +++ b/bundle/config/variable/resolve_notification_destination_test.go @@ -2,7 +2,7 @@ package variable import ( "context" - "fmt" + "errors" "testing" "github.com/databricks/databricks-sdk-go/experimental/mocks" @@ -35,7 +35,7 @@ func TestResolveNotificationDestination_ResolveError(t *testing.T) { api := m.GetMockNotificationDestinationsAPI() api.EXPECT(). ListAll(mock.Anything, mock.Anything). - Return(nil, fmt.Errorf("bad")) + Return(nil, errors.New("bad")) ctx := context.Background() l := resolveNotificationDestination{name: "destination"} diff --git a/bundle/config/variable/resolve_pipeline.go b/bundle/config/variable/resolve_pipeline.go index cabc620da..33b14530d 100644 --- a/bundle/config/variable/resolve_pipeline.go +++ b/bundle/config/variable/resolve_pipeline.go @@ -2,7 +2,6 @@ package variable import ( "context" - "fmt" "github.com/databricks/databricks-sdk-go" ) @@ -16,9 +15,9 @@ func (l resolvePipeline) Resolve(ctx context.Context, w *databricks.WorkspaceCli if err != nil { return "", err } - return fmt.Sprint(entity.PipelineId), nil + return entity.PipelineId, nil } func (l resolvePipeline) String() string { - return fmt.Sprintf("pipeline: %s", l.name) + return "pipeline: " + l.name } diff --git a/bundle/config/variable/resolve_query.go b/bundle/config/variable/resolve_query.go index 602ff8deb..88f653dc6 100644 --- a/bundle/config/variable/resolve_query.go +++ b/bundle/config/variable/resolve_query.go @@ -2,7 +2,6 @@ package variable import ( "context" - "fmt" "github.com/databricks/databricks-sdk-go" ) @@ -16,9 +15,9 @@ func (l resolveQuery) Resolve(ctx context.Context, w *databricks.WorkspaceClient if err != nil { return "", err } - return fmt.Sprint(entity.Id), nil + return entity.Id, nil } func (l resolveQuery) String() string { - return fmt.Sprintf("query: %s", l.name) + return "query: " + l.name } diff --git a/bundle/config/variable/resolve_service_principal.go b/bundle/config/variable/resolve_service_principal.go index 3bea4314b..03b8e3089 100644 --- a/bundle/config/variable/resolve_service_principal.go +++ b/bundle/config/variable/resolve_service_principal.go @@ -2,7 +2,6 @@ package variable import ( "context" - "fmt" "github.com/databricks/databricks-sdk-go" ) @@ -16,9 +15,9 @@ func (l resolveServicePrincipal) Resolve(ctx context.Context, w *databricks.Work if err != nil { return "", err } - return fmt.Sprint(entity.ApplicationId), nil + return entity.ApplicationId, nil } func (l resolveServicePrincipal) String() string { - return fmt.Sprintf("service-principal: %s", l.name) + return "service-principal: " + l.name } diff --git a/bundle/config/variable/resolve_warehouse.go b/bundle/config/variable/resolve_warehouse.go index fbd3663a2..cabdb1160 100644 --- a/bundle/config/variable/resolve_warehouse.go +++ b/bundle/config/variable/resolve_warehouse.go @@ -2,7 +2,6 @@ package variable import ( "context" - "fmt" "github.com/databricks/databricks-sdk-go" ) @@ -16,9 +15,9 @@ func (l resolveWarehouse) Resolve(ctx context.Context, w *databricks.WorkspaceCl if err != nil { return "", err } - return fmt.Sprint(entity.Id), nil + return entity.Id, nil } func (l resolveWarehouse) String() string { - return fmt.Sprintf("warehouse: %s", l.name) + return "warehouse: " + l.name } diff --git a/bundle/config/variable/variable.go b/bundle/config/variable/variable.go index 2362ad10d..95a68cfeb 100644 --- a/bundle/config/variable/variable.go +++ b/bundle/config/variable/variable.go @@ -1,6 +1,7 @@ package variable import ( + "errors" "fmt" "reflect" ) @@ -68,7 +69,7 @@ func (v *Variable) Set(val VariableValue) error { switch rv.Kind() { case reflect.Struct, reflect.Array, reflect.Slice, reflect.Map: if v.Type != VariableTypeComplex { - return fmt.Errorf("variable type is not complex") + return errors.New("variable type is not complex") } } diff --git a/bundle/config/workspace_test.go b/bundle/config/workspace_test.go index 3ef963253..384cc0a2c 100644 --- a/bundle/config/workspace_test.go +++ b/bundle/config/workspace_test.go @@ -11,6 +11,7 @@ import ( "github.com/databricks/cli/libs/databrickscfg" "github.com/databricks/databricks-sdk-go/config" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func setupWorkspaceTest(t *testing.T) string { @@ -42,11 +43,12 @@ func TestWorkspaceResolveProfileFromHost(t *testing.T) { setupWorkspaceTest(t) // This works if there is a config file with a matching profile. - databrickscfg.SaveToProfile(context.Background(), &config.Config{ + err := databrickscfg.SaveToProfile(context.Background(), &config.Config{ Profile: "default", Host: "https://abc.cloud.databricks.com", Token: "123", }) + require.NoError(t, err) client, err := w.Client() assert.NoError(t, err) @@ -57,12 +59,13 @@ func TestWorkspaceResolveProfileFromHost(t *testing.T) { home := setupWorkspaceTest(t) // This works if there is a config file with a matching profile. - databrickscfg.SaveToProfile(context.Background(), &config.Config{ + err := databrickscfg.SaveToProfile(context.Background(), &config.Config{ ConfigFile: filepath.Join(home, "customcfg"), Profile: "custom", Host: "https://abc.cloud.databricks.com", Token: "123", }) + require.NoError(t, err) t.Setenv("DATABRICKS_CONFIG_FILE", filepath.Join(home, "customcfg")) client, err := w.Client() @@ -90,12 +93,13 @@ func TestWorkspaceVerifyProfileForHost(t *testing.T) { setupWorkspaceTest(t) // This works if there is a config file with a matching profile. - databrickscfg.SaveToProfile(context.Background(), &config.Config{ + err := databrickscfg.SaveToProfile(context.Background(), &config.Config{ Profile: "abc", Host: "https://abc.cloud.databricks.com", }) + require.NoError(t, err) - _, err := w.Client() + _, err = w.Client() assert.NoError(t, err) }) @@ -103,12 +107,13 @@ func TestWorkspaceVerifyProfileForHost(t *testing.T) { setupWorkspaceTest(t) // This works if there is a config file with a matching profile. - databrickscfg.SaveToProfile(context.Background(), &config.Config{ + err := databrickscfg.SaveToProfile(context.Background(), &config.Config{ Profile: "abc", Host: "https://def.cloud.databricks.com", }) + require.NoError(t, err) - _, err := w.Client() + _, err = w.Client() assert.ErrorContains(t, err, "config host mismatch") }) @@ -116,14 +121,15 @@ func TestWorkspaceVerifyProfileForHost(t *testing.T) { home := setupWorkspaceTest(t) // This works if there is a config file with a matching profile. - databrickscfg.SaveToProfile(context.Background(), &config.Config{ + err := databrickscfg.SaveToProfile(context.Background(), &config.Config{ ConfigFile: filepath.Join(home, "customcfg"), Profile: "abc", Host: "https://abc.cloud.databricks.com", }) + require.NoError(t, err) t.Setenv("DATABRICKS_CONFIG_FILE", filepath.Join(home, "customcfg")) - _, err := w.Client() + _, err = w.Client() assert.NoError(t, err) }) @@ -131,14 +137,15 @@ func TestWorkspaceVerifyProfileForHost(t *testing.T) { home := setupWorkspaceTest(t) // This works if there is a config file with a matching profile. - databrickscfg.SaveToProfile(context.Background(), &config.Config{ + err := databrickscfg.SaveToProfile(context.Background(), &config.Config{ ConfigFile: filepath.Join(home, "customcfg"), Profile: "abc", Host: "https://def.cloud.databricks.com", }) + require.NoError(t, err) t.Setenv("DATABRICKS_CONFIG_FILE", filepath.Join(home, "customcfg")) - _, err := w.Client() + _, err = w.Client() assert.ErrorContains(t, err, "config host mismatch") }) } diff --git a/bundle/context_test.go b/bundle/context_test.go index 3a0f159d9..89a6df052 100644 --- a/bundle/context_test.go +++ b/bundle/context_test.go @@ -12,7 +12,7 @@ func TestGetPanics(t *testing.T) { defer func() { r := recover() require.NotNil(t, r, "The function did not panic") - assert.Equal(t, r, "context not configured with bundle") + assert.Equal(t, "context not configured with bundle", r) }() Get(context.Background()) diff --git a/bundle/deferred.go b/bundle/deferred.go index 56c2bdca2..e7e0c2aeb 100644 --- a/bundle/deferred.go +++ b/bundle/deferred.go @@ -15,7 +15,7 @@ func (d *DeferredMutator) Name() string { return "deferred" } -func Defer(mutator Mutator, finally Mutator) Mutator { +func Defer(mutator, finally Mutator) Mutator { return &DeferredMutator{ mutator: mutator, finally: finally, diff --git a/bundle/deferred_test.go b/bundle/deferred_test.go index 3abc4aa10..ea3df17c4 100644 --- a/bundle/deferred_test.go +++ b/bundle/deferred_test.go @@ -19,7 +19,7 @@ func (t *mutatorWithError) Name() string { func (t *mutatorWithError) Apply(_ context.Context, b *Bundle) diag.Diagnostics { t.applyCalled++ - return diag.Errorf(t.errorMsg) + return diag.Errorf(t.errorMsg) // nolint:govet } func TestDeferredMutatorWhenAllMutatorsSucceed(t *testing.T) { diff --git a/bundle/deploy/metadata/compute.go b/bundle/deploy/metadata/compute.go index bc8767de4..b47baa6b2 100644 --- a/bundle/deploy/metadata/compute.go +++ b/bundle/deploy/metadata/compute.go @@ -54,5 +54,8 @@ func (m *compute) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnostics { // Set file upload destination of the bundle in metadata b.Metadata.Config.Workspace.FilePath = b.Config.Workspace.FilePath + if config.IsExplicitlyEnabled(b.Config.Presets.SourceLinkedDeployment) { + b.Metadata.Config.Workspace.FilePath = b.SyncRootPath + } return nil } diff --git a/bundle/deploy/metadata/compute_test.go b/bundle/deploy/metadata/compute_test.go index 2c2c72376..c6fa9bddb 100644 --- a/bundle/deploy/metadata/compute_test.go +++ b/bundle/deploy/metadata/compute_test.go @@ -97,3 +97,24 @@ func TestComputeMetadataMutator(t *testing.T) { assert.Equal(t, expectedMetadata, b.Metadata) } + +func TestComputeMetadataMutatorSourceLinked(t *testing.T) { + syncRootPath := "/Users/shreyas.goenka@databricks.com/source" + enabled := true + b := &bundle.Bundle{ + SyncRootPath: syncRootPath, + Config: config.Root{ + Presets: config.Presets{ + SourceLinkedDeployment: &enabled, + }, + Workspace: config.Workspace{ + FilePath: "/Users/shreyas.goenka@databricks.com/files", + }, + }, + } + + diags := bundle.Apply(context.Background(), b, Compute()) + require.NoError(t, diags.Error()) + + assert.Equal(t, syncRootPath, b.Metadata.Config.Workspace.FilePath) +} diff --git a/bundle/deploy/state.go b/bundle/deploy/state.go index 4f2bc4ee4..6e285034a 100644 --- a/bundle/deploy/state.go +++ b/bundle/deploy/state.go @@ -3,6 +3,7 @@ package deploy import ( "context" "encoding/json" + "errors" "fmt" "io" "io/fs" @@ -15,8 +16,10 @@ import ( "github.com/google/uuid" ) -const DeploymentStateFileName = "deployment.json" -const DeploymentStateVersion = 1 +const ( + DeploymentStateFileName = "deployment.json" + DeploymentStateVersion = 1 +) type File struct { LocalPath string `json:"local_path"` @@ -93,7 +96,7 @@ func (e *entry) Type() fs.FileMode { func (e *entry) Info() (fs.FileInfo, error) { if e.info == nil { - return nil, fmt.Errorf("no info available") + return nil, errors.New("no info available") } return e.info, nil } @@ -132,7 +135,7 @@ func (f Filelist) ToSlice(root vfs.Path) []fileset.File { return files } -func isLocalStateStale(local io.Reader, remote io.Reader) bool { +func isLocalStateStale(local, remote io.Reader) bool { localState, err := loadState(local) if err != nil { return true diff --git a/bundle/deploy/state_pull.go b/bundle/deploy/state_pull.go index 5e301a6f3..8fffca073 100644 --- a/bundle/deploy/state_pull.go +++ b/bundle/deploy/state_pull.go @@ -44,7 +44,7 @@ func (s *statePull) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostic return diag.FromErr(err) } - local, err := os.OpenFile(statePath, os.O_CREATE|os.O_RDWR, 0600) + local, err := os.OpenFile(statePath, os.O_CREATE|os.O_RDWR, 0o600) if err != nil { return diag.FromErr(err) } @@ -62,8 +62,14 @@ func (s *statePull) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostic } // Truncating the file before writing - local.Truncate(0) - local.Seek(0, 0) + err = local.Truncate(0) + if err != nil { + return diag.FromErr(err) + } + _, err = local.Seek(0, 0) + if err != nil { + return diag.FromErr(err) + } // Write file to disk. log.Infof(ctx, "Writing remote deployment state file to local cache directory") diff --git a/bundle/deploy/state_pull_test.go b/bundle/deploy/state_pull_test.go index 42701eb26..f38b71f6b 100644 --- a/bundle/deploy/state_pull_test.go +++ b/bundle/deploy/state_pull_test.go @@ -4,7 +4,6 @@ import ( "bytes" "context" "encoding/json" - "errors" "io" "io/fs" "os" @@ -99,7 +98,7 @@ func testStatePull(t *testing.T, opts statePullOpts) { snapshotPath, err := sync.SnapshotPath(opts) require.NoError(t, err) - err = os.WriteFile(snapshotPath, []byte("snapshot"), 0644) + err = os.WriteFile(snapshotPath, []byte("snapshot"), 0o644) require.NoError(t, err) } @@ -110,7 +109,7 @@ func testStatePull(t *testing.T, opts statePullOpts) { data, err := json.Marshal(opts.localState) require.NoError(t, err) - err = os.WriteFile(statePath, data, 0644) + err = os.WriteFile(statePath, data, 0o644) require.NoError(t, err) } @@ -279,7 +278,7 @@ func TestStatePullNoState(t *testing.T) { require.NoError(t, err) _, err = os.Stat(statePath) - require.True(t, errors.Is(err, fs.ErrNotExist)) + require.ErrorIs(t, err, fs.ErrNotExist) } func TestStatePullOlderState(t *testing.T) { diff --git a/bundle/deploy/state_push_test.go b/bundle/deploy/state_push_test.go index 038b75341..3562ec147 100644 --- a/bundle/deploy/state_push_test.go +++ b/bundle/deploy/state_push_test.go @@ -74,7 +74,7 @@ func TestStatePush(t *testing.T) { data, err := json.Marshal(state) require.NoError(t, err) - err = os.WriteFile(statePath, data, 0644) + err = os.WriteFile(statePath, data, 0o644) require.NoError(t, err) diags := bundle.Apply(ctx, b, s) diff --git a/bundle/deploy/state_update.go b/bundle/deploy/state_update.go index 9ab1bacf1..5488d50ed 100644 --- a/bundle/deploy/state_update.go +++ b/bundle/deploy/state_update.go @@ -17,8 +17,7 @@ import ( "github.com/google/uuid" ) -type stateUpdate struct { -} +type stateUpdate struct{} func (s *stateUpdate) Name() string { return "deploy:state-update" @@ -57,7 +56,7 @@ func (s *stateUpdate) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnost return diag.FromErr(err) } // Write the state back to the file. - f, err := os.OpenFile(statePath, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0600) + f, err := os.OpenFile(statePath, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0o600) if err != nil { log.Infof(ctx, "Unable to open deployment state file: %s", err) return diag.FromErr(err) diff --git a/bundle/deploy/state_update_test.go b/bundle/deploy/state_update_test.go index 1f5010b52..04c5579a8 100644 --- a/bundle/deploy/state_update_test.go +++ b/bundle/deploy/state_update_test.go @@ -60,7 +60,7 @@ func TestStateUpdate(t *testing.T) { require.NoError(t, err) require.Equal(t, int64(1), state.Seq) - require.Equal(t, state.Files, Filelist{ + require.Equal(t, Filelist{ { LocalPath: "test1.py", }, @@ -68,7 +68,7 @@ func TestStateUpdate(t *testing.T) { LocalPath: "test2.py", IsNotebook: true, }, - }) + }, state.Files) require.Equal(t, build.GetInfo().Version, state.CliVersion) diags = bundle.Apply(ctx, b, s) @@ -79,7 +79,7 @@ func TestStateUpdate(t *testing.T) { require.NoError(t, err) require.Equal(t, int64(2), state.Seq) - require.Equal(t, state.Files, Filelist{ + require.Equal(t, Filelist{ { LocalPath: "test1.py", }, @@ -87,7 +87,7 @@ func TestStateUpdate(t *testing.T) { LocalPath: "test2.py", IsNotebook: true, }, - }) + }, state.Files) require.Equal(t, build.GetInfo().Version, state.CliVersion) // Valid non-empty UUID is generated. @@ -119,7 +119,7 @@ func TestStateUpdateWithExistingState(t *testing.T) { data, err := json.Marshal(state) require.NoError(t, err) - err = os.WriteFile(statePath, data, 0644) + err = os.WriteFile(statePath, data, 0o644) require.NoError(t, err) diags := bundle.Apply(ctx, b, s) @@ -130,7 +130,7 @@ func TestStateUpdateWithExistingState(t *testing.T) { require.NoError(t, err) require.Equal(t, int64(11), state.Seq) - require.Equal(t, state.Files, Filelist{ + require.Equal(t, Filelist{ { LocalPath: "test1.py", }, @@ -138,7 +138,7 @@ func TestStateUpdateWithExistingState(t *testing.T) { LocalPath: "test2.py", IsNotebook: true, }, - }) + }, state.Files) require.Equal(t, build.GetInfo().Version, state.CliVersion) // Existing UUID is not overwritten. diff --git a/bundle/deploy/terraform/check_dashboards_modified_remotely.go b/bundle/deploy/terraform/check_dashboards_modified_remotely.go index c884bcb9b..66914af54 100644 --- a/bundle/deploy/terraform/check_dashboards_modified_remotely.go +++ b/bundle/deploy/terraform/check_dashboards_modified_remotely.go @@ -42,8 +42,7 @@ func collectDashboardsFromState(ctx context.Context, b *bundle.Bundle) ([]dashbo return dashboards, nil } -type checkDashboardsModifiedRemotely struct { -} +type checkDashboardsModifiedRemotely struct{} func (l *checkDashboardsModifiedRemotely) Name() string { return "CheckDashboardsModifiedRemotely" @@ -73,7 +72,7 @@ func (l *checkDashboardsModifiedRemotely) Apply(ctx context.Context, b *bundle.B continue } - path := dyn.MustPathFromString(fmt.Sprintf("resources.dashboards.%s", dashboard.Name)) + path := dyn.MustPathFromString("resources.dashboards." + dashboard.Name) loc := b.Config.GetLocation(path.String()) actual, err := b.WorkspaceClient().Lakeview.GetByDashboardId(ctx, dashboard.ID) if err != nil { diff --git a/bundle/deploy/terraform/check_dashboards_modified_remotely_test.go b/bundle/deploy/terraform/check_dashboards_modified_remotely_test.go index 25aee125f..46bdc1f38 100644 --- a/bundle/deploy/terraform/check_dashboards_modified_remotely_test.go +++ b/bundle/deploy/terraform/check_dashboards_modified_remotely_test.go @@ -2,7 +2,7 @@ package terraform import ( "context" - "fmt" + "errors" "path/filepath" "testing" @@ -122,7 +122,7 @@ func TestCheckDashboardsModifiedRemotely_ExistingStateFailureToGet(t *testing.T) dashboardsAPI := m.GetMockLakeviewAPI() dashboardsAPI.EXPECT(). GetByDashboardId(mock.Anything, "id1"). - Return(nil, fmt.Errorf("failure")). + Return(nil, errors.New("failure")). Once() b.SetWorkpaceClient(m.WorkspaceClient) @@ -139,7 +139,7 @@ func writeFakeDashboardState(t *testing.T, ctx context.Context, b *bundle.Bundle require.NoError(t, err) // Write fake state file. - testutil.WriteFile(t, ` + testutil.WriteFile(t, filepath.Join(tfDir, TerraformStateFileName), ` { "version": 4, "terraform_version": "1.5.5", @@ -187,5 +187,5 @@ func writeFakeDashboardState(t *testing.T, ctx context.Context, b *bundle.Bundle } ] } - `, filepath.Join(tfDir, TerraformStateFileName)) + `) } diff --git a/bundle/deploy/terraform/check_running_resources.go b/bundle/deploy/terraform/check_running_resources.go index 737f773e5..5b3a70408 100644 --- a/bundle/deploy/terraform/check_running_resources.go +++ b/bundle/deploy/terraform/check_running_resources.go @@ -23,8 +23,7 @@ func (e ErrResourceIsRunning) Error() string { return fmt.Sprintf("%s %s is running", e.resourceType, e.resourceId) } -type checkRunningResources struct { -} +type checkRunningResources struct{} func (l *checkRunningResources) Name() string { return "check-running-resources" diff --git a/bundle/deploy/terraform/convert_test.go b/bundle/deploy/terraform/convert_test.go index 076d9b7a0..ccfdcece3 100644 --- a/bundle/deploy/terraform/convert_test.go +++ b/bundle/deploy/terraform/convert_test.go @@ -43,7 +43,7 @@ func convertToResourceStruct[T any](t *testing.T, resource *T, data any) { } func TestBundleToTerraformJob(t *testing.T) { - var src = resources.Job{ + src := resources.Job{ JobSettings: &jobs.JobSettings{ Name: "my job", JobClusters: []jobs.JobCluster{ @@ -71,7 +71,7 @@ func TestBundleToTerraformJob(t *testing.T) { }, } - var config = config.Root{ + config := config.Root{ Resources: config.Resources{ Jobs: map[string]*resources.Job{ "my_job": &src, @@ -93,7 +93,7 @@ func TestBundleToTerraformJob(t *testing.T) { } func TestBundleToTerraformJobPermissions(t *testing.T) { - var src = resources.Job{ + src := resources.Job{ Permissions: []resources.Permission{ { Level: "CAN_VIEW", @@ -102,7 +102,7 @@ func TestBundleToTerraformJobPermissions(t *testing.T) { }, } - var config = config.Root{ + config := config.Root{ Resources: config.Resources{ Jobs: map[string]*resources.Job{ "my_job": &src, @@ -121,7 +121,7 @@ func TestBundleToTerraformJobPermissions(t *testing.T) { } func TestBundleToTerraformJobTaskLibraries(t *testing.T) { - var src = resources.Job{ + src := resources.Job{ JobSettings: &jobs.JobSettings{ Name: "my job", Tasks: []jobs.Task{ @@ -139,7 +139,7 @@ func TestBundleToTerraformJobTaskLibraries(t *testing.T) { }, } - var config = config.Root{ + config := config.Root{ Resources: config.Resources{ Jobs: map[string]*resources.Job{ "my_job": &src, @@ -158,7 +158,7 @@ func TestBundleToTerraformJobTaskLibraries(t *testing.T) { } func TestBundleToTerraformForEachTaskLibraries(t *testing.T) { - var src = resources.Job{ + src := resources.Job{ JobSettings: &jobs.JobSettings{ Name: "my job", Tasks: []jobs.Task{ @@ -182,7 +182,7 @@ func TestBundleToTerraformForEachTaskLibraries(t *testing.T) { }, } - var config = config.Root{ + config := config.Root{ Resources: config.Resources{ Jobs: map[string]*resources.Job{ "my_job": &src, @@ -201,7 +201,7 @@ func TestBundleToTerraformForEachTaskLibraries(t *testing.T) { } func TestBundleToTerraformPipeline(t *testing.T) { - var src = resources.Pipeline{ + src := resources.Pipeline{ PipelineSpec: &pipelines.PipelineSpec{ Name: "my pipeline", Libraries: []pipelines.PipelineLibrary{ @@ -239,7 +239,7 @@ func TestBundleToTerraformPipeline(t *testing.T) { }, } - var config = config.Root{ + config := config.Root{ Resources: config.Resources{ Pipelines: map[string]*resources.Pipeline{ "my_pipeline": &src, @@ -254,15 +254,15 @@ func TestBundleToTerraformPipeline(t *testing.T) { assert.Equal(t, "my pipeline", resource.Name) assert.Len(t, resource.Library, 2) assert.Len(t, resource.Notification, 2) - assert.Equal(t, resource.Notification[0].Alerts, []string{"on-update-fatal-failure"}) - assert.Equal(t, resource.Notification[0].EmailRecipients, []string{"jane@doe.com"}) - assert.Equal(t, resource.Notification[1].Alerts, []string{"on-update-failure", "on-flow-failure"}) - assert.Equal(t, resource.Notification[1].EmailRecipients, []string{"jane@doe.com", "john@doe.com"}) + assert.Equal(t, []string{"on-update-fatal-failure"}, resource.Notification[0].Alerts) + assert.Equal(t, []string{"jane@doe.com"}, resource.Notification[0].EmailRecipients) + assert.Equal(t, []string{"on-update-failure", "on-flow-failure"}, resource.Notification[1].Alerts) + assert.Equal(t, []string{"jane@doe.com", "john@doe.com"}, resource.Notification[1].EmailRecipients) assert.Nil(t, out.Data) } func TestBundleToTerraformPipelinePermissions(t *testing.T) { - var src = resources.Pipeline{ + src := resources.Pipeline{ Permissions: []resources.Permission{ { Level: "CAN_VIEW", @@ -271,7 +271,7 @@ func TestBundleToTerraformPipelinePermissions(t *testing.T) { }, } - var config = config.Root{ + config := config.Root{ Resources: config.Resources{ Pipelines: map[string]*resources.Pipeline{ "my_pipeline": &src, @@ -290,7 +290,7 @@ func TestBundleToTerraformPipelinePermissions(t *testing.T) { } func TestBundleToTerraformModel(t *testing.T) { - var src = resources.MlflowModel{ + src := resources.MlflowModel{ Model: &ml.Model{ Name: "name", Description: "description", @@ -307,7 +307,7 @@ func TestBundleToTerraformModel(t *testing.T) { }, } - var config = config.Root{ + config := config.Root{ Resources: config.Resources{ Models: map[string]*resources.MlflowModel{ "my_model": &src, @@ -330,7 +330,7 @@ func TestBundleToTerraformModel(t *testing.T) { } func TestBundleToTerraformModelPermissions(t *testing.T) { - var src = resources.MlflowModel{ + src := resources.MlflowModel{ Model: &ml.Model{ Name: "name", }, @@ -342,7 +342,7 @@ func TestBundleToTerraformModelPermissions(t *testing.T) { }, } - var config = config.Root{ + config := config.Root{ Resources: config.Resources{ Models: map[string]*resources.MlflowModel{ "my_model": &src, @@ -361,13 +361,13 @@ func TestBundleToTerraformModelPermissions(t *testing.T) { } func TestBundleToTerraformExperiment(t *testing.T) { - var src = resources.MlflowExperiment{ + src := resources.MlflowExperiment{ Experiment: &ml.Experiment{ Name: "name", }, } - var config = config.Root{ + config := config.Root{ Resources: config.Resources{ Experiments: map[string]*resources.MlflowExperiment{ "my_experiment": &src, @@ -384,7 +384,7 @@ func TestBundleToTerraformExperiment(t *testing.T) { } func TestBundleToTerraformExperimentPermissions(t *testing.T) { - var src = resources.MlflowExperiment{ + src := resources.MlflowExperiment{ Experiment: &ml.Experiment{ Name: "name", }, @@ -396,7 +396,7 @@ func TestBundleToTerraformExperimentPermissions(t *testing.T) { }, } - var config = config.Root{ + config := config.Root{ Resources: config.Resources{ Experiments: map[string]*resources.MlflowExperiment{ "my_experiment": &src, @@ -415,7 +415,7 @@ func TestBundleToTerraformExperimentPermissions(t *testing.T) { } func TestBundleToTerraformModelServing(t *testing.T) { - var src = resources.ModelServingEndpoint{ + src := resources.ModelServingEndpoint{ CreateServingEndpoint: &serving.CreateServingEndpoint{ Name: "name", Config: serving.EndpointCoreConfigInput{ @@ -439,7 +439,7 @@ func TestBundleToTerraformModelServing(t *testing.T) { }, } - var config = config.Root{ + config := config.Root{ Resources: config.Resources{ ModelServingEndpoints: map[string]*resources.ModelServingEndpoint{ "my_model_serving_endpoint": &src, @@ -454,7 +454,7 @@ func TestBundleToTerraformModelServing(t *testing.T) { assert.Equal(t, "name", resource.Name) assert.Equal(t, "model_name", resource.Config.ServedModels[0].ModelName) assert.Equal(t, "1", resource.Config.ServedModels[0].ModelVersion) - assert.Equal(t, true, resource.Config.ServedModels[0].ScaleToZeroEnabled) + assert.True(t, resource.Config.ServedModels[0].ScaleToZeroEnabled) assert.Equal(t, "Small", resource.Config.ServedModels[0].WorkloadSize) assert.Equal(t, "model_name-1", resource.Config.TrafficConfig.Routes[0].ServedModelName) assert.Equal(t, 100, resource.Config.TrafficConfig.Routes[0].TrafficPercentage) @@ -462,7 +462,7 @@ func TestBundleToTerraformModelServing(t *testing.T) { } func TestBundleToTerraformModelServingPermissions(t *testing.T) { - var src = resources.ModelServingEndpoint{ + src := resources.ModelServingEndpoint{ CreateServingEndpoint: &serving.CreateServingEndpoint{ Name: "name", @@ -492,7 +492,7 @@ func TestBundleToTerraformModelServingPermissions(t *testing.T) { }, } - var config = config.Root{ + config := config.Root{ Resources: config.Resources{ ModelServingEndpoints: map[string]*resources.ModelServingEndpoint{ "my_model_serving_endpoint": &src, @@ -511,7 +511,7 @@ func TestBundleToTerraformModelServingPermissions(t *testing.T) { } func TestBundleToTerraformRegisteredModel(t *testing.T) { - var src = resources.RegisteredModel{ + src := resources.RegisteredModel{ CreateRegisteredModelRequest: &catalog.CreateRegisteredModelRequest{ Name: "name", CatalogName: "catalog", @@ -520,7 +520,7 @@ func TestBundleToTerraformRegisteredModel(t *testing.T) { }, } - var config = config.Root{ + config := config.Root{ Resources: config.Resources{ RegisteredModels: map[string]*resources.RegisteredModel{ "my_registered_model": &src, @@ -540,7 +540,7 @@ func TestBundleToTerraformRegisteredModel(t *testing.T) { } func TestBundleToTerraformRegisteredModelGrants(t *testing.T) { - var src = resources.RegisteredModel{ + src := resources.RegisteredModel{ CreateRegisteredModelRequest: &catalog.CreateRegisteredModelRequest{ Name: "name", CatalogName: "catalog", @@ -554,7 +554,7 @@ func TestBundleToTerraformRegisteredModelGrants(t *testing.T) { }, } - var config = config.Root{ + config := config.Root{ Resources: config.Resources{ RegisteredModels: map[string]*resources.RegisteredModel{ "my_registered_model": &src, @@ -573,14 +573,14 @@ func TestBundleToTerraformRegisteredModelGrants(t *testing.T) { } func TestBundleToTerraformDeletedResources(t *testing.T) { - var job1 = resources.Job{ + job1 := resources.Job{ JobSettings: &jobs.JobSettings{}, } - var job2 = resources.Job{ + job2 := resources.Job{ ModifiedStatus: resources.ModifiedStatusDeleted, JobSettings: &jobs.JobSettings{}, } - var config = config.Root{ + config := config.Root{ Resources: config.Resources{ Jobs: map[string]*resources.Job{ "my_job1": &job1, @@ -601,10 +601,10 @@ func TestBundleToTerraformDeletedResources(t *testing.T) { } func TestTerraformToBundleEmptyLocalResources(t *testing.T) { - var config = config.Root{ + config := config.Root{ Resources: config.Resources{}, } - var tfState = resourcesState{ + tfState := resourcesState{ Resources: []stateResource{ { Type: "databricks_job", @@ -736,7 +736,7 @@ func TestTerraformToBundleEmptyLocalResources(t *testing.T) { } func TestTerraformToBundleEmptyRemoteResources(t *testing.T) { - var config = config.Root{ + config := config.Root{ Resources: config.Resources{ Jobs: map[string]*resources.Job{ "test_job": { @@ -817,7 +817,7 @@ func TestTerraformToBundleEmptyRemoteResources(t *testing.T) { }, }, } - var tfState = resourcesState{ + tfState := resourcesState{ Resources: nil, } err := TerraformToBundle(&tfState, &config) @@ -860,7 +860,7 @@ func TestTerraformToBundleEmptyRemoteResources(t *testing.T) { } func TestTerraformToBundleModifiedResources(t *testing.T) { - var config = config.Root{ + config := config.Root{ Resources: config.Resources{ Jobs: map[string]*resources.Job{ "test_job": { @@ -996,7 +996,7 @@ func TestTerraformToBundleModifiedResources(t *testing.T) { }, }, } - var tfState = resourcesState{ + tfState := resourcesState{ Resources: []stateResource{ { Type: "databricks_job", @@ -1261,7 +1261,7 @@ func TestTerraformToBundleModifiedResources(t *testing.T) { func AssertFullResourceCoverage(t *testing.T, config *config.Root) { resources := reflect.ValueOf(config.Resources) - for i := 0; i < resources.NumField(); i++ { + for i := range resources.NumField() { field := resources.Field(i) if field.Kind() == reflect.Map { assert.True( diff --git a/bundle/deploy/terraform/import.go b/bundle/deploy/terraform/import.go index 0a1d1b9ce..a0604e71d 100644 --- a/bundle/deploy/terraform/import.go +++ b/bundle/deploy/terraform/import.go @@ -7,6 +7,7 @@ import ( "io" "os" "path/filepath" + "strings" "github.com/databricks/cli/bundle" "github.com/databricks/cli/libs/cmdio" @@ -67,7 +68,7 @@ func (m *importResource) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagn if changed && !m.opts.AutoApprove { output := buf.String() // Remove output starting from Warning until end of output - output = output[:bytes.Index([]byte(output), []byte("Warning:"))] + output = output[:strings.Index(output, "Warning:")] cmdio.LogString(ctx, output) if !cmdio.IsPromptSupported(ctx) { diff --git a/bundle/deploy/terraform/init.go b/bundle/deploy/terraform/init.go index 7d75ee8a8..d982354e1 100644 --- a/bundle/deploy/terraform/init.go +++ b/bundle/deploy/terraform/init.go @@ -145,7 +145,7 @@ func inheritEnvVars(ctx context.Context, environ map[string]string) error { // This function is used for env vars set by the Databricks VSCode extension. The variables are intended to be used by the CLI // bundled with the Databricks VSCode extension, but users can use different CLI versions in the VSCode terminals, in which case we want to ignore // the variables if that CLI uses different versions of the dependencies. -func getEnvVarWithMatchingVersion(ctx context.Context, envVarName string, versionVarName string, currentVersion string) (string, error) { +func getEnvVarWithMatchingVersion(ctx context.Context, envVarName, versionVarName, currentVersion string) (string, error) { envValue := env.Get(ctx, envVarName) versionValue := env.Get(ctx, versionVarName) @@ -230,9 +230,13 @@ func setUserAgentExtraEnvVar(environ map[string]string, b *bundle.Bundle) error // Add "cli" to the user agent in set by the Databricks Terraform provider. // This will allow us to attribute downstream requests made by the Databricks // Terraform provider to the CLI. - products := []string{fmt.Sprintf("cli/%s", build.GetInfo().Version)} + products := []string{"cli/" + build.GetInfo().Version} if experimental := b.Config.Experimental; experimental != nil { - if experimental.PyDABs.Enabled { + hasPython := experimental.Python.Resources != nil || experimental.Python.Mutators != nil + + if hasPython { + products = append(products, "databricks-pydabs/0.7.0") + } else if experimental.PyDABs.Enabled { products = append(products, "databricks-pydabs/0.0.0") } } diff --git a/bundle/deploy/terraform/init_test.go b/bundle/deploy/terraform/init_test.go index e3621c6c3..c7a4ffe4a 100644 --- a/bundle/deploy/terraform/init_test.go +++ b/bundle/deploy/terraform/init_test.go @@ -225,7 +225,7 @@ func TestSetProxyEnvVars(t *testing.T) { env := make(map[string]string, 0) err := setProxyEnvVars(context.Background(), env, b) require.NoError(t, err) - assert.Len(t, env, 0) + assert.Empty(t, env) // Lower case set. clearEnv() @@ -248,7 +248,7 @@ func TestSetProxyEnvVars(t *testing.T) { assert.ElementsMatch(t, []string{"HTTP_PROXY", "HTTPS_PROXY", "NO_PROXY"}, maps.Keys(env)) } -func TestSetUserAgentExtraEnvVar(t *testing.T) { +func TestSetUserAgentExtraEnvVar_PyDABs(t *testing.T) { b := &bundle.Bundle{ BundleRootPath: t.TempDir(), Config: config.Root{ @@ -268,6 +268,26 @@ func TestSetUserAgentExtraEnvVar(t *testing.T) { }, env) } +func TestSetUserAgentExtraEnvVar_Python(t *testing.T) { + b := &bundle.Bundle{ + BundleRootPath: t.TempDir(), + Config: config.Root{ + Experimental: &config.Experimental{ + Python: config.Python{ + Resources: []string{"my_project.resources:load_resources"}, + }, + }, + }, + } + + env := make(map[string]string, 0) + err := setUserAgentExtraEnvVar(env, b) + require.NoError(t, err) + assert.Equal(t, map[string]string{ + "DATABRICKS_USER_AGENT_EXTRA": "cli/0.0.0-dev databricks-pydabs/0.7.0", + }, env) +} + func TestInheritEnvVars(t *testing.T) { t.Setenv("HOME", "/home/testuser") t.Setenv("PATH", "/foo:/bar") @@ -293,7 +313,7 @@ func TestSetUserProfileFromInheritEnvVars(t *testing.T) { require.NoError(t, err) assert.Contains(t, env, "USERPROFILE") - assert.Equal(t, env["USERPROFILE"], "c:\\foo\\c") + assert.Equal(t, "c:\\foo\\c", env["USERPROFILE"]) } func TestInheritEnvVarsWithAbsentTFConfigFile(t *testing.T) { @@ -400,7 +420,7 @@ func TestFindExecPathFromEnvironmentWithCorrectVersionAndBinary(t *testing.T) { require.Equal(t, tmpBinPath, b.Config.Bundle.Terraform.ExecPath) } -func createTempFile(t *testing.T, dest string, name string, executable bool) string { +func createTempFile(t *testing.T, dest, name string, executable bool) string { binPath := filepath.Join(dest, name) f, err := os.Create(binPath) require.NoError(t, err) @@ -409,7 +429,7 @@ func createTempFile(t *testing.T, dest string, name string, executable bool) str require.NoError(t, err) }() if executable { - err = f.Chmod(0777) + err = f.Chmod(0o777) require.NoError(t, err) } return binPath @@ -422,7 +442,7 @@ func TestGetEnvVarWithMatchingVersion(t *testing.T) { tmp := t.TempDir() file := testutil.Touch(t, tmp, "bar") - var tc = []struct { + tc := []struct { envValue string versionValue string currentVersion string diff --git a/bundle/deploy/terraform/interpolate.go b/bundle/deploy/terraform/interpolate.go index 9c2126aec..813e6bbb7 100644 --- a/bundle/deploy/terraform/interpolate.go +++ b/bundle/deploy/terraform/interpolate.go @@ -10,8 +10,7 @@ import ( "github.com/databricks/cli/libs/dyn/dynvar" ) -type interpolateMutator struct { -} +type interpolateMutator struct{} func Interpolate() bundle.Mutator { return &interpolateMutator{} diff --git a/bundle/deploy/terraform/load.go b/bundle/deploy/terraform/load.go index 3fb76855e..1c563fa77 100644 --- a/bundle/deploy/terraform/load.go +++ b/bundle/deploy/terraform/load.go @@ -2,6 +2,7 @@ package terraform import ( "context" + "errors" "fmt" "slices" @@ -58,7 +59,7 @@ func (l *load) validateState(state *resourcesState) error { } if len(state.Resources) == 0 && slices.Contains(l.modes, ErrorOnEmptyState) { - return fmt.Errorf("no deployment state. Did you forget to run 'databricks bundle deploy'?") + return errors.New("no deployment state. Did you forget to run 'databricks bundle deploy'?") } return nil diff --git a/bundle/deploy/terraform/pkg.go b/bundle/deploy/terraform/pkg.go index cad754024..bd636639f 100644 --- a/bundle/deploy/terraform/pkg.go +++ b/bundle/deploy/terraform/pkg.go @@ -5,15 +5,19 @@ import ( "github.com/hashicorp/go-version" ) -const TerraformStateFileName = "terraform.tfstate" -const TerraformConfigFileName = "bundle.tf.json" +const ( + TerraformStateFileName = "terraform.tfstate" + TerraformConfigFileName = "bundle.tf.json" +) // Users can provide their own terraform binary and databricks terraform provider by setting the following environment variables. // This allows users to use the CLI in an air-gapped environments. See the `debug terraform` command. -const TerraformExecPathEnv = "DATABRICKS_TF_EXEC_PATH" -const TerraformVersionEnv = "DATABRICKS_TF_VERSION" -const TerraformCliConfigPathEnv = "DATABRICKS_TF_CLI_CONFIG_FILE" -const TerraformProviderVersionEnv = "DATABRICKS_TF_PROVIDER_VERSION" +const ( + TerraformExecPathEnv = "DATABRICKS_TF_EXEC_PATH" + TerraformVersionEnv = "DATABRICKS_TF_VERSION" + TerraformCliConfigPathEnv = "DATABRICKS_TF_CLI_CONFIG_FILE" + TerraformProviderVersionEnv = "DATABRICKS_TF_PROVIDER_VERSION" +) // Terraform CLI version to use and the corresponding checksums for it. The // checksums are used to verify the integrity of the downloaded binary. Please @@ -26,8 +30,10 @@ const TerraformProviderVersionEnv = "DATABRICKS_TF_PROVIDER_VERSION" // downloaded Terraform archive. var TerraformVersion = version.Must(version.NewVersion("1.5.5")) -const checksumLinuxArm64 = "b055aefe343d0b710d8a7afd31aeb702b37bbf4493bb9385a709991e48dfbcd2" -const checksumLinuxAmd64 = "ad0c696c870c8525357b5127680cd79c0bdf58179af9acd091d43b1d6482da4a" +const ( + checksumLinuxArm64 = "b055aefe343d0b710d8a7afd31aeb702b37bbf4493bb9385a709991e48dfbcd2" + checksumLinuxAmd64 = "ad0c696c870c8525357b5127680cd79c0bdf58179af9acd091d43b1d6482da4a" +) type Checksum struct { LinuxArm64 string `json:"linux_arm64"` diff --git a/bundle/deploy/terraform/pkg_test.go b/bundle/deploy/terraform/pkg_test.go index b8dcb9e08..08ec3de75 100644 --- a/bundle/deploy/terraform/pkg_test.go +++ b/bundle/deploy/terraform/pkg_test.go @@ -14,7 +14,7 @@ import ( "github.com/stretchr/testify/require" ) -func downloadAndChecksum(t *testing.T, url string, expectedChecksum string) { +func downloadAndChecksum(t *testing.T, url, expectedChecksum string) { resp, err := http.Get(url) require.NoError(t, err) defer resp.Body.Close() diff --git a/bundle/deploy/terraform/plan.go b/bundle/deploy/terraform/plan.go index 72f0b49a8..7f7473efa 100644 --- a/bundle/deploy/terraform/plan.go +++ b/bundle/deploy/terraform/plan.go @@ -2,7 +2,6 @@ package terraform import ( "context" - "fmt" "path/filepath" "github.com/databricks/cli/bundle" @@ -57,7 +56,7 @@ func (p *plan) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { IsEmpty: !notEmpty, } - log.Debugf(ctx, fmt.Sprintf("Planning complete and persisted at %s\n", planPath)) + log.Debugf(ctx, "Planning complete and persisted at %s\n", planPath) return nil } diff --git a/bundle/deploy/terraform/state_pull.go b/bundle/deploy/terraform/state_pull.go index 9a5b91007..4e1e2b1c5 100644 --- a/bundle/deploy/terraform/state_pull.go +++ b/bundle/deploy/terraform/state_pull.go @@ -104,7 +104,7 @@ func (l *statePull) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostic localState, err := l.localState(ctx, b) if errors.Is(err, fs.ErrNotExist) { log.Infof(ctx, "Local state file does not exist. Using remote Terraform state.") - err := os.WriteFile(localStatePath, remoteContent, 0600) + err := os.WriteFile(localStatePath, remoteContent, 0o600) return diag.FromErr(err) } if err != nil { @@ -114,14 +114,14 @@ func (l *statePull) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostic // If the lineage does not match, the Terraform state files do not correspond to the same deployment. if localState.Lineage != remoteState.Lineage { log.Infof(ctx, "Remote and local state lineages do not match. Using remote Terraform state. Invalidating local Terraform state.") - err := os.WriteFile(localStatePath, remoteContent, 0600) + err := os.WriteFile(localStatePath, remoteContent, 0o600) return diag.FromErr(err) } // If the remote state is newer than the local state, we should use the remote state. if remoteState.Serial > localState.Serial { log.Infof(ctx, "Remote state is newer than local state. Using remote Terraform state.") - err := os.WriteFile(localStatePath, remoteContent, 0600) + err := os.WriteFile(localStatePath, remoteContent, 0o600) return diag.FromErr(err) } diff --git a/bundle/deploy/terraform/state_push_test.go b/bundle/deploy/terraform/state_push_test.go index 4cc52b7a7..54e7f621c 100644 --- a/bundle/deploy/terraform/state_push_test.go +++ b/bundle/deploy/terraform/state_push_test.go @@ -71,7 +71,7 @@ func TestStatePushLargeState(t *testing.T) { b := statePushTestBundle(t) largeState := map[string]any{} - for i := 0; i < 1000000; i++ { + for i := range 1000000 { largeState[fmt.Sprintf("field_%d", i)] = i } diff --git a/bundle/deploy/terraform/tfdyn/convert_cluster_test.go b/bundle/deploy/terraform/tfdyn/convert_cluster_test.go index e6d2620c6..330720a7c 100644 --- a/bundle/deploy/terraform/tfdyn/convert_cluster_test.go +++ b/bundle/deploy/terraform/tfdyn/convert_cluster_test.go @@ -14,7 +14,7 @@ import ( ) func TestConvertCluster(t *testing.T) { - var src = resources.Cluster{ + src := resources.Cluster{ ClusterSpec: &compute.ClusterSpec{ NumWorkers: 3, SparkVersion: "13.3.x-scala2.12", @@ -93,5 +93,4 @@ func TestConvertCluster(t *testing.T) { }, }, }, out.Permissions["cluster_my_cluster"]) - } diff --git a/bundle/deploy/terraform/tfdyn/convert_dashboard.go b/bundle/deploy/terraform/tfdyn/convert_dashboard.go index 3ba7e19a2..3ec8b489f 100644 --- a/bundle/deploy/terraform/tfdyn/convert_dashboard.go +++ b/bundle/deploy/terraform/tfdyn/convert_dashboard.go @@ -17,7 +17,7 @@ const ( ) // Marshal "serialized_dashboard" as JSON if it is set in the input but not in the output. -func marshalSerializedDashboard(vin dyn.Value, vout dyn.Value) (dyn.Value, error) { +func marshalSerializedDashboard(vin, vout dyn.Value) (dyn.Value, error) { // Skip if the "serialized_dashboard" field is already set. if v := vout.Get(serializedDashboardFieldName); v.IsValid() { return vout, nil diff --git a/bundle/deploy/terraform/tfdyn/convert_dashboard_test.go b/bundle/deploy/terraform/tfdyn/convert_dashboard_test.go index 539ba21aa..6f5d36504 100644 --- a/bundle/deploy/terraform/tfdyn/convert_dashboard_test.go +++ b/bundle/deploy/terraform/tfdyn/convert_dashboard_test.go @@ -14,7 +14,7 @@ import ( ) func TestConvertDashboard(t *testing.T) { - var src = resources.Dashboard{ + src := resources.Dashboard{ Dashboard: &dashboards.Dashboard{ DisplayName: "my dashboard", WarehouseId: "f00dcafe", @@ -60,7 +60,7 @@ func TestConvertDashboard(t *testing.T) { } func TestConvertDashboardFilePath(t *testing.T) { - var src = resources.Dashboard{ + src := resources.Dashboard{ FilePath: "some/path", } @@ -84,7 +84,7 @@ func TestConvertDashboardFilePath(t *testing.T) { } func TestConvertDashboardFilePathQuoted(t *testing.T) { - var src = resources.Dashboard{ + src := resources.Dashboard{ FilePath: `C:\foo\bar\baz\dashboard.lvdash.json`, } @@ -108,7 +108,7 @@ func TestConvertDashboardFilePathQuoted(t *testing.T) { } func TestConvertDashboardSerializedDashboardString(t *testing.T) { - var src = resources.Dashboard{ + src := resources.Dashboard{ SerializedDashboard: `{ "json": true }`, } @@ -127,7 +127,7 @@ func TestConvertDashboardSerializedDashboardString(t *testing.T) { } func TestConvertDashboardSerializedDashboardAny(t *testing.T) { - var src = resources.Dashboard{ + src := resources.Dashboard{ SerializedDashboard: map[string]any{ "pages": []map[string]any{ { diff --git a/bundle/deploy/terraform/tfdyn/convert_experiment_test.go b/bundle/deploy/terraform/tfdyn/convert_experiment_test.go index 63add4368..3ef3963f2 100644 --- a/bundle/deploy/terraform/tfdyn/convert_experiment_test.go +++ b/bundle/deploy/terraform/tfdyn/convert_experiment_test.go @@ -14,7 +14,7 @@ import ( ) func TestConvertExperiment(t *testing.T) { - var src = resources.MlflowExperiment{ + src := resources.MlflowExperiment{ Experiment: &ml.Experiment{ Name: "name", }, diff --git a/bundle/deploy/terraform/tfdyn/convert_grants_test.go b/bundle/deploy/terraform/tfdyn/convert_grants_test.go index a486bc36f..0a263b493 100644 --- a/bundle/deploy/terraform/tfdyn/convert_grants_test.go +++ b/bundle/deploy/terraform/tfdyn/convert_grants_test.go @@ -13,7 +13,7 @@ import ( ) func TestConvertGrants(t *testing.T) { - var src = resources.RegisteredModel{ + src := resources.RegisteredModel{ Grants: []resources.Grant{ { Privileges: []string{"EXECUTE", "FOO"}, @@ -45,7 +45,7 @@ func TestConvertGrants(t *testing.T) { } func TestConvertGrantsNil(t *testing.T) { - var src = resources.RegisteredModel{ + src := resources.RegisteredModel{ Grants: nil, } @@ -58,7 +58,7 @@ func TestConvertGrantsNil(t *testing.T) { } func TestConvertGrantsEmpty(t *testing.T) { - var src = resources.RegisteredModel{ + src := resources.RegisteredModel{ Grants: []resources.Grant{}, } diff --git a/bundle/deploy/terraform/tfdyn/convert_job.go b/bundle/deploy/terraform/tfdyn/convert_job.go index 8948e3baf..bb2f8cd0f 100644 --- a/bundle/deploy/terraform/tfdyn/convert_job.go +++ b/bundle/deploy/terraform/tfdyn/convert_job.go @@ -83,7 +83,6 @@ func convertJobResource(ctx context.Context, vin dyn.Value) (dyn.Value, error) { "libraries": "library", }) }) - if err != nil { return dyn.InvalidValue, err } diff --git a/bundle/deploy/terraform/tfdyn/convert_job_test.go b/bundle/deploy/terraform/tfdyn/convert_job_test.go index 695b9ba24..c73e530d4 100644 --- a/bundle/deploy/terraform/tfdyn/convert_job_test.go +++ b/bundle/deploy/terraform/tfdyn/convert_job_test.go @@ -15,7 +15,7 @@ import ( ) func TestConvertJob(t *testing.T) { - var src = resources.Job{ + src := resources.Job{ JobSettings: &jobs.JobSettings{ Name: "my job", JobClusters: []jobs.JobCluster{ diff --git a/bundle/deploy/terraform/tfdyn/convert_model_serving_endpoint_test.go b/bundle/deploy/terraform/tfdyn/convert_model_serving_endpoint_test.go index 63b75e9ab..d46350bb7 100644 --- a/bundle/deploy/terraform/tfdyn/convert_model_serving_endpoint_test.go +++ b/bundle/deploy/terraform/tfdyn/convert_model_serving_endpoint_test.go @@ -14,7 +14,7 @@ import ( ) func TestConvertModelServingEndpoint(t *testing.T) { - var src = resources.ModelServingEndpoint{ + src := resources.ModelServingEndpoint{ CreateServingEndpoint: &serving.CreateServingEndpoint{ Name: "name", Config: serving.EndpointCoreConfigInput{ diff --git a/bundle/deploy/terraform/tfdyn/convert_model_test.go b/bundle/deploy/terraform/tfdyn/convert_model_test.go index 542caa878..4c4e62c5b 100644 --- a/bundle/deploy/terraform/tfdyn/convert_model_test.go +++ b/bundle/deploy/terraform/tfdyn/convert_model_test.go @@ -14,7 +14,7 @@ import ( ) func TestConvertModel(t *testing.T) { - var src = resources.MlflowModel{ + src := resources.MlflowModel{ Model: &ml.Model{ Name: "name", Description: "description", diff --git a/bundle/deploy/terraform/tfdyn/convert_permissions_test.go b/bundle/deploy/terraform/tfdyn/convert_permissions_test.go index ba389020f..ba04384b5 100644 --- a/bundle/deploy/terraform/tfdyn/convert_permissions_test.go +++ b/bundle/deploy/terraform/tfdyn/convert_permissions_test.go @@ -13,7 +13,7 @@ import ( ) func TestConvertPermissions(t *testing.T) { - var src = resources.Job{ + src := resources.Job{ Permissions: []resources.Permission{ { Level: "CAN_VIEW", @@ -59,7 +59,7 @@ func TestConvertPermissions(t *testing.T) { } func TestConvertPermissionsNil(t *testing.T) { - var src = resources.Job{ + src := resources.Job{ Permissions: nil, } @@ -72,7 +72,7 @@ func TestConvertPermissionsNil(t *testing.T) { } func TestConvertPermissionsEmpty(t *testing.T) { - var src = resources.Job{ + src := resources.Job{ Permissions: []resources.Permission{}, } diff --git a/bundle/deploy/terraform/tfdyn/convert_pipeline_test.go b/bundle/deploy/terraform/tfdyn/convert_pipeline_test.go index 7010d463a..0239bad18 100644 --- a/bundle/deploy/terraform/tfdyn/convert_pipeline_test.go +++ b/bundle/deploy/terraform/tfdyn/convert_pipeline_test.go @@ -14,7 +14,7 @@ import ( ) func TestConvertPipeline(t *testing.T) { - var src = resources.Pipeline{ + src := resources.Pipeline{ PipelineSpec: &pipelines.PipelineSpec{ Name: "my pipeline", Libraries: []pipelines.PipelineLibrary{ diff --git a/bundle/deploy/terraform/tfdyn/convert_quality_monitor_test.go b/bundle/deploy/terraform/tfdyn/convert_quality_monitor_test.go index f71abf43c..16b30de71 100644 --- a/bundle/deploy/terraform/tfdyn/convert_quality_monitor_test.go +++ b/bundle/deploy/terraform/tfdyn/convert_quality_monitor_test.go @@ -14,7 +14,7 @@ import ( ) func TestConvertQualityMonitor(t *testing.T) { - var src = resources.QualityMonitor{ + src := resources.QualityMonitor{ TableName: "test_table_name", CreateMonitor: &catalog.CreateMonitor{ AssetsDir: "assets_dir", diff --git a/bundle/deploy/terraform/tfdyn/convert_registered_model_test.go b/bundle/deploy/terraform/tfdyn/convert_registered_model_test.go index 77096e8d0..bf2a5ab64 100644 --- a/bundle/deploy/terraform/tfdyn/convert_registered_model_test.go +++ b/bundle/deploy/terraform/tfdyn/convert_registered_model_test.go @@ -14,7 +14,7 @@ import ( ) func TestConvertRegisteredModel(t *testing.T) { - var src = resources.RegisteredModel{ + src := resources.RegisteredModel{ CreateRegisteredModelRequest: &catalog.CreateRegisteredModelRequest{ Name: "name", CatalogName: "catalog", diff --git a/bundle/deploy/terraform/tfdyn/convert_schema_test.go b/bundle/deploy/terraform/tfdyn/convert_schema_test.go index 2efbf3e43..12822bb3c 100644 --- a/bundle/deploy/terraform/tfdyn/convert_schema_test.go +++ b/bundle/deploy/terraform/tfdyn/convert_schema_test.go @@ -14,7 +14,7 @@ import ( ) func TestConvertSchema(t *testing.T) { - var src = resources.Schema{ + src := resources.Schema{ CreateSchema: &catalog.CreateSchema{ Name: "name", CatalogName: "catalog", diff --git a/bundle/deploy/terraform/tfdyn/convert_volume_test.go b/bundle/deploy/terraform/tfdyn/convert_volume_test.go index c897ae69a..09b69489e 100644 --- a/bundle/deploy/terraform/tfdyn/convert_volume_test.go +++ b/bundle/deploy/terraform/tfdyn/convert_volume_test.go @@ -14,7 +14,7 @@ import ( ) func TestConvertVolume(t *testing.T) { - var src = resources.Volume{ + src := resources.Volume{ CreateVolumeRequestContent: &catalog.CreateVolumeRequestContent{ CatalogName: "catalog", Comment: "comment", diff --git a/bundle/deploy/terraform/tfdyn/rename_keys.go b/bundle/deploy/terraform/tfdyn/rename_keys.go index 650ffb890..95904575f 100644 --- a/bundle/deploy/terraform/tfdyn/rename_keys.go +++ b/bundle/deploy/terraform/tfdyn/rename_keys.go @@ -11,7 +11,7 @@ import ( // definition uses the plural name. This function can convert between the two. func renameKeys(v dyn.Value, rename map[string]string) (dyn.Value, error) { var err error - var acc = dyn.V(map[string]dyn.Value{}) + acc := dyn.V(map[string]dyn.Value{}) nv, err := dyn.Walk(v, func(p dyn.Path, v dyn.Value) (dyn.Value, error) { if len(p) == 0 { @@ -36,7 +36,6 @@ func renameKeys(v dyn.Value, rename map[string]string) (dyn.Value, error) { // Pass through all other values. return v, dyn.ErrSkip }) - if err != nil { return dyn.InvalidValue, err } diff --git a/bundle/deploy/terraform/unbind.go b/bundle/deploy/terraform/unbind.go index 49d65615e..494cb7ef1 100644 --- a/bundle/deploy/terraform/unbind.go +++ b/bundle/deploy/terraform/unbind.go @@ -37,6 +37,6 @@ func (*unbind) Name() string { return "terraform.Unbind" } -func Unbind(resourceType string, resourceKey string) bundle.Mutator { +func Unbind(resourceType, resourceKey string) bundle.Mutator { return &unbind{resourceType: resourceType, resourceKey: resourceKey} } diff --git a/bundle/internal/bundletest/location.go b/bundle/internal/bundletest/location.go index 2ffd621bf..5dcd9d78f 100644 --- a/bundle/internal/bundletest/location.go +++ b/bundle/internal/bundletest/location.go @@ -10,7 +10,7 @@ import ( // with the path it is loaded from. func SetLocation(b *bundle.Bundle, prefix string, locations []dyn.Location) { start := dyn.MustPathFromString(prefix) - b.Config.Mutate(func(root dyn.Value) (dyn.Value, error) { + err := b.Config.Mutate(func(root dyn.Value) (dyn.Value, error) { return dyn.Walk(root, func(p dyn.Path, v dyn.Value) (dyn.Value, error) { // If the path has the given prefix, set the location. if p.HasPrefix(start) { @@ -27,4 +27,7 @@ func SetLocation(b *bundle.Bundle, prefix string, locations []dyn.Location) { return v, dyn.ErrSkip }) }) + if err != nil { + panic("Mutate() failed: " + err.Error()) + } } diff --git a/bundle/internal/schema/annotations.go b/bundle/internal/schema/annotations.go new file mode 100644 index 000000000..91aaa4555 --- /dev/null +++ b/bundle/internal/schema/annotations.go @@ -0,0 +1,257 @@ +package main + +import ( + "bytes" + "fmt" + "os" + "reflect" + "regexp" + "slices" + "strings" + + yaml3 "gopkg.in/yaml.v3" + + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/dyn/convert" + "github.com/databricks/cli/libs/dyn/merge" + "github.com/databricks/cli/libs/dyn/yamlloader" + "github.com/databricks/cli/libs/dyn/yamlsaver" + "github.com/databricks/cli/libs/jsonschema" +) + +type annotation struct { + Description string `json:"description,omitempty"` + MarkdownDescription string `json:"markdown_description,omitempty"` + Title string `json:"title,omitempty"` + Default any `json:"default,omitempty"` + Enum []any `json:"enum,omitempty"` +} + +type annotationHandler struct { + // Annotations read from all annotation files including all overrides + parsedAnnotations annotationFile + // Missing annotations for fields that are found in config that need to be added to the annotation file + missingAnnotations annotationFile +} + +/** + * Parsed file with annotations, expected format: + * github.com/databricks/cli/bundle/config.Bundle: + * cluster_id: + * description: "Description" + */ +type annotationFile map[string]map[string]annotation + +const Placeholder = "PLACEHOLDER" + +// Adds annotations to the JSON schema reading from the annotation files. +// More details https://json-schema.org/understanding-json-schema/reference/annotations +func newAnnotationHandler(sources []string) (*annotationHandler, error) { + prev := dyn.NilValue + for _, path := range sources { + b, err := os.ReadFile(path) + if err != nil { + return nil, err + } + generated, err := yamlloader.LoadYAML(path, bytes.NewBuffer(b)) + if err != nil { + return nil, err + } + prev, err = merge.Merge(prev, generated) + if err != nil { + return nil, err + } + } + + var data annotationFile + + err := convert.ToTyped(&data, prev) + if err != nil { + return nil, err + } + + d := &annotationHandler{} + d.parsedAnnotations = data + d.missingAnnotations = annotationFile{} + return d, nil +} + +func (d *annotationHandler) addAnnotations(typ reflect.Type, s jsonschema.Schema) jsonschema.Schema { + refPath := getPath(typ) + shouldHandle := strings.HasPrefix(refPath, "github.com") + if !shouldHandle { + return s + } + + annotations := d.parsedAnnotations[refPath] + if annotations == nil { + annotations = map[string]annotation{} + } + + rootTypeAnnotation, ok := annotations[RootTypeKey] + if ok { + assignAnnotation(&s, rootTypeAnnotation) + } + + for k, v := range s.Properties { + item := annotations[k] + if item.Description == "" { + item.Description = Placeholder + + emptyAnnotations := d.missingAnnotations[refPath] + if emptyAnnotations == nil { + emptyAnnotations = map[string]annotation{} + d.missingAnnotations[refPath] = emptyAnnotations + } + emptyAnnotations[k] = item + } + assignAnnotation(v, item) + } + return s +} + +// Writes missing annotations with placeholder values back to the annotation file +func (d *annotationHandler) syncWithMissingAnnotations(outputPath string) error { + existingFile, err := os.ReadFile(outputPath) + if err != nil { + return err + } + existing, err := yamlloader.LoadYAML("", bytes.NewBuffer(existingFile)) + if err != nil { + return err + } + + for k := range d.missingAnnotations { + if !isCliPath(k) { + delete(d.missingAnnotations, k) + fmt.Printf("Missing annotations for `%s` that are not in CLI package, try to fetch latest OpenAPI spec and regenerate annotations", k) + } + } + + missingAnnotations, err := convert.FromTyped(d.missingAnnotations, dyn.NilValue) + if err != nil { + return err + } + + output, err := merge.Merge(existing, missingAnnotations) + if err != nil { + return err + } + + var outputTyped annotationFile + err = convert.ToTyped(&outputTyped, output) + if err != nil { + return err + } + + err = saveYamlWithStyle(outputPath, outputTyped) + if err != nil { + return err + } + return nil +} + +func getPath(typ reflect.Type) string { + return typ.PkgPath() + "." + typ.Name() +} + +func assignAnnotation(s *jsonschema.Schema, a annotation) { + if a.Description != Placeholder { + s.Description = a.Description + } + + if a.Default != nil { + s.Default = a.Default + } + s.MarkdownDescription = convertLinksToAbsoluteUrl(a.MarkdownDescription) + s.Title = a.Title + s.Enum = a.Enum +} + +func saveYamlWithStyle(outputPath string, annotations annotationFile) error { + annotationOrder := yamlsaver.NewOrder([]string{"description", "markdown_description", "title", "default", "enum"}) + style := map[string]yaml3.Style{} + + order := getAlphabeticalOrder(annotations) + dynMap := map[string]dyn.Value{} + for k, v := range annotations { + style[k] = yaml3.LiteralStyle + + properties := map[string]dyn.Value{} + propertiesOrder := getAlphabeticalOrder(v) + for key, value := range v { + d, err := convert.FromTyped(value, dyn.NilValue) + if d.Kind() == dyn.KindNil || err != nil { + properties[key] = dyn.NewValue(map[string]dyn.Value{}, []dyn.Location{{Line: propertiesOrder.Get(key)}}) + continue + } + val, err := yamlsaver.ConvertToMapValue(value, annotationOrder, []string{}, map[string]dyn.Value{}) + if err != nil { + return err + } + properties[key] = val.WithLocations([]dyn.Location{{Line: propertiesOrder.Get(key)}}) + } + + dynMap[k] = dyn.NewValue(properties, []dyn.Location{{Line: order.Get(k)}}) + } + + saver := yamlsaver.NewSaverWithStyle(style) + err := saver.SaveAsYAML(dynMap, outputPath, true) + if err != nil { + return err + } + return nil +} + +func getAlphabeticalOrder[T any](mapping map[string]T) *yamlsaver.Order { + order := []string{} + for k := range mapping { + order = append(order, k) + } + slices.Sort(order) + return yamlsaver.NewOrder(order) +} + +func convertLinksToAbsoluteUrl(s string) string { + if s == "" { + return s + } + base := "https://docs.databricks.com" + referencePage := "/dev-tools/bundles/reference.html" + + // Regular expression to match Markdown-style links like [_](link) + re := regexp.MustCompile(`\[_\]\(([^)]+)\)`) + result := re.ReplaceAllStringFunc(s, func(match string) string { + matches := re.FindStringSubmatch(match) + if len(matches) < 2 { + return match + } + link := matches[1] + var text, absoluteURL string + + if strings.HasPrefix(link, "#") { + text = strings.TrimPrefix(link, "#") + absoluteURL = fmt.Sprintf("%s%s%s", base, referencePage, link) + + // Handle relative paths like /dev-tools/bundles/resources.html#dashboard + } else if strings.HasPrefix(link, "/") { + absoluteURL = strings.ReplaceAll(fmt.Sprintf("%s%s", base, link), ".md", ".html") + if strings.Contains(link, "#") { + parts := strings.Split(link, "#") + text = parts[1] + } else { + text = "link" + } + } else { + return match + } + + return fmt.Sprintf("[%s](%s)", text, absoluteURL) + }) + + return result +} + +func isCliPath(path string) bool { + return !strings.HasPrefix(path, "github.com/databricks/databricks-sdk-go") +} diff --git a/bundle/internal/schema/annotations.yml b/bundle/internal/schema/annotations.yml new file mode 100644 index 000000000..5283a431b --- /dev/null +++ b/bundle/internal/schema/annotations.yml @@ -0,0 +1,461 @@ +github.com/databricks/cli/bundle/config.Artifact: + "build": + "description": |- + An optional set of non-default build commands that you want to run locally before deployment. + + For Python wheel builds, the Databricks CLI assumes that it can find a local install of the Python wheel package to run builds, and it runs the command python setup.py bdist_wheel by default during each bundle deployment. + + To specify multiple build commands, separate each command with double-ampersand (&&) characters. + "executable": + "description": |- + The executable type. + "files": + "description": |- + The source files for the artifact. + "markdown_description": |- + The source files for the artifact, defined as an [_](#artifact_file). + "path": + "description": |- + The location where the built artifact will be saved. + "type": + "description": |- + The type of the artifact. + "markdown_description": |- + The type of the artifact. Valid values are `wheel` or `jar` +github.com/databricks/cli/bundle/config.ArtifactFile: + "source": + "description": |- + The path of the files used to build the artifact. +github.com/databricks/cli/bundle/config.Bundle: + "cluster_id": + "description": |- + The ID of a cluster to use to run the bundle. + "markdown_description": |- + The ID of a cluster to use to run the bundle. See [_](/dev-tools/bundles/settings.md#cluster_id). + "compute_id": + "description": |- + PLACEHOLDER + "databricks_cli_version": + "description": |- + The Databricks CLI version to use for the bundle. + "markdown_description": |- + The Databricks CLI version to use for the bundle. See [_](/dev-tools/bundles/settings.md#databricks_cli_version). + "deployment": + "description": |- + The definition of the bundle deployment + "markdown_description": |- + The definition of the bundle deployment. For supported attributes, see [_](#deployment) and [_](/dev-tools/bundles/deployment-modes.md). + "git": + "description": |- + The Git version control details that are associated with your bundle. + "markdown_description": |- + The Git version control details that are associated with your bundle. For supported attributes, see [_](#git) and [_](/dev-tools/bundles/settings.md#git). + "name": + "description": |- + The name of the bundle. + "uuid": + "description": |- + PLACEHOLDER +github.com/databricks/cli/bundle/config.Deployment: + "fail_on_active_runs": + "description": |- + Whether to fail on active runs. If this is set to true a deployment that is running can be interrupted. + "lock": + "description": |- + The deployment lock attributes. + "markdown_description": |- + The deployment lock attributes. See [_](#lock). +github.com/databricks/cli/bundle/config.Experimental: + "pydabs": + "description": |- + The PyDABs configuration. + "python": + "description": |- + Configures loading of Python code defined with 'databricks-bundles' package. + "python_wheel_wrapper": + "description": |- + Whether to use a Python wheel wrapper + "scripts": + "description": |- + The commands to run + "use_legacy_run_as": + "description": |- + Whether to use the legacy run_as behavior +github.com/databricks/cli/bundle/config.Git: + "branch": + "description": |- + The Git branch name. + "markdown_description": |- + The Git branch name. See [_](/dev-tools/bundles/settings.md#git). + "origin_url": + "description": |- + The origin URL of the repository. + "markdown_description": |- + The origin URL of the repository. See [_](/dev-tools/bundles/settings.md#git). +github.com/databricks/cli/bundle/config.Lock: + "enabled": + "description": |- + Whether this lock is enabled. + "force": + "description": |- + Whether to force this lock if it is enabled. +github.com/databricks/cli/bundle/config.Presets: + "jobs_max_concurrent_runs": + "description": |- + The maximum concurrent runs for a job. + "name_prefix": + "description": |- + The prefix for job runs of the bundle. + "pipelines_development": + "description": |- + Whether pipeline deployments should be locked in development mode. + "source_linked_deployment": + "description": |- + Whether to link the deployment to the bundle source. + "tags": + "description": |- + The tags for the bundle deployment. + "trigger_pause_status": + "description": |- + A pause status to apply to all job triggers and schedules. Valid values are PAUSED or UNPAUSED. +github.com/databricks/cli/bundle/config.PyDABs: + "enabled": + "description": |- + Whether or not PyDABs (Private Preview) is enabled + "import": + "description": |- + The PyDABs project to import to discover resources, resource generator and mutators + "venv_path": + "description": |- + The Python virtual environment path +github.com/databricks/cli/bundle/config.Python: + "mutators": + "description": |- + Mutators contains a list of fully qualified function paths to mutator functions. + + Example: ["my_project.mutators:add_default_cluster"] + "resources": + "description": |- + Resources contains a list of fully qualified function paths to load resources + defined in Python code. + + Example: ["my_project.resources:load_resources"] + "venv_path": + "description": |- + VEnvPath is path to the virtual environment. + + If enabled, Python code will execute within this environment. If disabled, + it defaults to using the Python interpreter available in the current shell. +github.com/databricks/cli/bundle/config.Resources: + "clusters": + "description": |- + The cluster definitions for the bundle. + "markdown_description": |- + The cluster definitions for the bundle. See [_](/dev-tools/bundles/resources.md#cluster) + "dashboards": + "description": |- + The dashboard definitions for the bundle. + "markdown_description": |- + The dashboard definitions for the bundle. See [_](/dev-tools/bundles/resources.md#dashboard) + "experiments": + "description": |- + The experiment definitions for the bundle. + "markdown_description": |- + The experiment definitions for the bundle. See [_](/dev-tools/bundles/resources.md#experiment) + "jobs": + "description": |- + The job definitions for the bundle. + "markdown_description": |- + The job definitions for the bundle. See [_](/dev-tools/bundles/resources.md#job) + "model_serving_endpoints": + "description": |- + The model serving endpoint definitions for the bundle. + "markdown_description": |- + The model serving endpoint definitions for the bundle. See [_](/dev-tools/bundles/resources.md#model_serving_endpoint) + "models": + "description": |- + The model definitions for the bundle. + "markdown_description": |- + The model definitions for the bundle. See [_](/dev-tools/bundles/resources.md#model) + "pipelines": + "description": |- + The pipeline definitions for the bundle. + "markdown_description": |- + The pipeline definitions for the bundle. See [_](/dev-tools/bundles/resources.md#pipeline) + "quality_monitors": + "description": |- + The quality monitor definitions for the bundle. + "markdown_description": |- + The quality monitor definitions for the bundle. See [_](/dev-tools/bundles/resources.md#quality_monitor) + "registered_models": + "description": |- + The registered model definitions for the bundle. + "markdown_description": |- + The registered model definitions for the bundle. See [_](/dev-tools/bundles/resources.md#registered_model) + "schemas": + "description": |- + The schema definitions for the bundle. + "markdown_description": |- + The schema definitions for the bundle. See [_](/dev-tools/bundles/resources.md#schema) + "volumes": + "description": |- + PLACEHOLDER +github.com/databricks/cli/bundle/config.Root: + "artifacts": + "description": |- + Defines the attributes to build an artifact + "bundle": + "description": |- + The attributes of the bundle. + "markdown_description": |- + The attributes of the bundle. See [_](/dev-tools/bundles/settings.md#bundle) + "experimental": + "description": |- + Defines attributes for experimental features. + "include": + "description": |- + Specifies a list of path globs that contain configuration files to include within the bundle. + "markdown_description": |- + Specifies a list of path globs that contain configuration files to include within the bundle. See [_](/dev-tools/bundles/settings.md#include) + "permissions": + "description": |- + Defines the permissions to apply to experiments, jobs, pipelines, and models defined in the bundle + "markdown_description": |- + Defines the permissions to apply to experiments, jobs, pipelines, and models defined in the bundle. See [_](/dev-tools/bundles/settings.md#permissions) and [_](/dev-tools/bundles/permissions.md). + "presets": + "description": |- + Defines bundle deployment presets. + "markdown_description": |- + Defines bundle deployment presets. See [_](/dev-tools/bundles/deployment-modes.md#presets). + "resources": + "description": |- + Specifies information about the Databricks resources used by the bundle + "markdown_description": |- + Specifies information about the Databricks resources used by the bundle. See [_](/dev-tools/bundles/resources.md). + "run_as": + "description": |- + The identity to use to run the bundle. + "sync": + "description": |- + The files and file paths to include or exclude in the bundle. + "markdown_description": |- + The files and file paths to include or exclude in the bundle. See [_](/dev-tools/bundles/) + "targets": + "description": |- + Defines deployment targets for the bundle. + "variables": + "description": |- + A Map that defines the custom variables for the bundle, where each key is the name of the variable, and the value is a Map that defines the variable. + "workspace": + "description": |- + Defines the Databricks workspace for the bundle. +github.com/databricks/cli/bundle/config.Sync: + "exclude": + "description": |- + A list of files or folders to exclude from the bundle. + "include": + "description": |- + A list of files or folders to include in the bundle. + "paths": + "description": |- + The local folder paths, which can be outside the bundle root, to synchronize to the workspace when the bundle is deployed. +github.com/databricks/cli/bundle/config.Target: + "artifacts": + "description": |- + The artifacts to include in the target deployment. + "markdown_description": |- + The artifacts to include in the target deployment. See [_](#artifact) + "bundle": + "description": |- + The name of the bundle when deploying to this target. + "cluster_id": + "description": |- + The ID of the cluster to use for this target. + "compute_id": + "description": |- + Deprecated. The ID of the compute to use for this target. + "default": + "description": |- + Whether this target is the default target. + "git": + "description": |- + The Git version control settings for the target. + "markdown_description": |- + The Git version control settings for the target. See [_](#git). + "mode": + "description": |- + The deployment mode for the target. + "markdown_description": |- + The deployment mode for the target. Valid values are `development` or `production`. See [_](/dev-tools/bundles/deployment-modes.md). + "permissions": + "description": |- + The permissions for deploying and running the bundle in the target. + "markdown_description": |- + The permissions for deploying and running the bundle in the target. See [_](#permission). + "presets": + "description": |- + The deployment presets for the target. + "markdown_description": |- + The deployment presets for the target. See [_](#preset). + "resources": + "description": |- + The resource definitions for the target. + "markdown_description": |- + The resource definitions for the target. See [_](#resources). + "run_as": + "description": |- + The identity to use to run the bundle. + "markdown_description": |- + The identity to use to run the bundle. See [_](#job_run_as) and [_](/dev-tools/bundles/run_as.md). + "sync": + "description": |- + The local paths to sync to the target workspace when a bundle is run or deployed. + "markdown_description": |- + The local paths to sync to the target workspace when a bundle is run or deployed. See [_](#sync). + "variables": + "description": |- + The custom variable definitions for the target. + "markdown_description": |- + The custom variable definitions for the target. See [_](/dev-tools/bundles/settings.md#variables) and [_](/dev-tools/bundles/variables.md). + "workspace": + "description": |- + The Databricks workspace for the target. + "markdown_description": |- + The Databricks workspace for the target. [_](#workspace) +github.com/databricks/cli/bundle/config.Workspace: + "artifact_path": + "description": |- + The artifact path to use within the workspace for both deployments and workflow runs + "auth_type": + "description": |- + The authentication type. + "azure_client_id": + "description": |- + The Azure client ID + "azure_environment": + "description": |- + The Azure environment + "azure_login_app_id": + "description": |- + The Azure login app ID + "azure_tenant_id": + "description": |- + The Azure tenant ID + "azure_use_msi": + "description": |- + Whether to use MSI for Azure + "azure_workspace_resource_id": + "description": |- + The Azure workspace resource ID + "client_id": + "description": |- + The client ID for the workspace + "file_path": + "description": |- + The file path to use within the workspace for both deployments and workflow runs + "google_service_account": + "description": |- + The Google service account name + "host": + "description": |- + The Databricks workspace host URL + "profile": + "description": |- + The Databricks workspace profile name + "resource_path": + "description": |- + The workspace resource path + "root_path": + "description": |- + The Databricks workspace root path + "state_path": + "description": |- + The workspace state path +github.com/databricks/cli/bundle/config/resources.Grant: + "principal": + "description": |- + The name of the principal that will be granted privileges + "privileges": + "description": |- + The privileges to grant to the specified entity +github.com/databricks/cli/bundle/config/resources.Permission: + "group_name": + "description": |- + The name of the group that has the permission set in level. + "level": + "description": |- + The allowed permission for user, group, service principal defined for this permission. + "service_principal_name": + "description": |- + The name of the service principal that has the permission set in level. + "user_name": + "description": |- + The name of the user that has the permission set in level. +github.com/databricks/cli/bundle/config/variable.Lookup: + "alert": + "description": |- + PLACEHOLDER + "cluster": + "description": |- + PLACEHOLDER + "cluster_policy": + "description": |- + PLACEHOLDER + "dashboard": + "description": |- + PLACEHOLDER + "instance_pool": + "description": |- + PLACEHOLDER + "job": + "description": |- + PLACEHOLDER + "metastore": + "description": |- + PLACEHOLDER + "notification_destination": + "description": |- + PLACEHOLDER + "pipeline": + "description": |- + PLACEHOLDER + "query": + "description": |- + PLACEHOLDER + "service_principal": + "description": |- + PLACEHOLDER + "warehouse": + "description": |- + PLACEHOLDER +github.com/databricks/cli/bundle/config/variable.TargetVariable: + "default": + "description": |- + PLACEHOLDER + "description": + "description": |- + The description of the variable. + "lookup": + "description": |- + The name of the alert, cluster_policy, cluster, dashboard, instance_pool, job, metastore, pipeline, query, service_principal, or warehouse object for which to retrieve an ID. + "markdown_description": + "description": |- + The type of the variable. + "type": + "description": |- + The type of the variable. +github.com/databricks/cli/bundle/config/variable.Variable: + "default": + "description": |- + PLACEHOLDER + "description": + "description": |- + The description of the variable + "lookup": + "description": |- + The name of the alert, cluster_policy, cluster, dashboard, instance_pool, job, metastore, pipeline, query, service_principal, or warehouse object for which to retrieve an ID. + "markdown_description": |- + The name of the `alert`, `cluster_policy`, `cluster`, `dashboard`, `instance_pool`, `job`, `metastore`, `pipeline`, `query`, `service_principal`, or `warehouse` object for which to retrieve an ID." + "type": + "description": |- + The type of the variable. diff --git a/bundle/internal/schema/annotations_openapi.yml b/bundle/internal/schema/annotations_openapi.yml new file mode 100644 index 000000000..e9c893c87 --- /dev/null +++ b/bundle/internal/schema/annotations_openapi.yml @@ -0,0 +1,2924 @@ +# This file is auto-generated. DO NOT EDIT. +github.com/databricks/cli/bundle/config/resources.Cluster: + "apply_policy_default_values": + "description": |- + When set to true, fixed and default values from the policy will be used for fields that are omitted. When set to false, only fixed values from the policy will be applied. + "autoscale": + "description": |- + Parameters needed in order to automatically scale clusters up and down based on load. + Note: autoscaling works best with DB runtime versions 3.0 or later. + "autotermination_minutes": + "description": |- + Automatically terminates the cluster after it is inactive for this time in minutes. If not set, + this cluster will not be automatically terminated. If specified, the threshold must be between + 10 and 10000 minutes. + Users can also set this value to 0 to explicitly disable automatic termination. + "aws_attributes": + "description": |- + Attributes related to clusters running on Amazon Web Services. + If not specified at cluster creation, a set of default values will be used. + "azure_attributes": + "description": |- + Attributes related to clusters running on Microsoft Azure. + If not specified at cluster creation, a set of default values will be used. + "cluster_log_conf": + "description": |- + The configuration for delivering spark logs to a long-term storage destination. + Two kinds of destinations (dbfs and s3) are supported. Only one destination can be specified + for one cluster. If the conf is given, the logs will be delivered to the destination every + `5 mins`. The destination of driver logs is `$destination/$clusterId/driver`, while + the destination of executor logs is `$destination/$clusterId/executor`. + "cluster_name": + "description": | + Cluster name requested by the user. This doesn't have to be unique. + If not specified at creation, the cluster name will be an empty string. + "custom_tags": + "description": |- + Additional tags for cluster resources. Databricks will tag all cluster resources (e.g., AWS + instances and EBS volumes) with these tags in addition to `default_tags`. Notes: + + - Currently, Databricks allows at most 45 custom tags + + - Clusters can only reuse cloud resources if the resources' tags are a subset of the cluster tags + "data_security_mode": {} + "docker_image": {} + "driver_instance_pool_id": + "description": |- + The optional ID of the instance pool for the driver of the cluster belongs. + The pool cluster uses the instance pool with id (instance_pool_id) if the driver pool is not + assigned. + "driver_node_type_id": + "description": | + The node type of the Spark driver. Note that this field is optional; + if unset, the driver node type will be set as the same value + as `node_type_id` defined above. + "enable_elastic_disk": + "description": |- + Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk + space when its Spark workers are running low on disk space. This feature requires specific AWS + permissions to function correctly - refer to the User Guide for more details. + "enable_local_disk_encryption": + "description": |- + Whether to enable LUKS on cluster VMs' local disks + "gcp_attributes": + "description": |- + Attributes related to clusters running on Google Cloud Platform. + If not specified at cluster creation, a set of default values will be used. + "init_scripts": + "description": |- + The configuration for storing init scripts. Any number of destinations can be specified. The scripts are executed sequentially in the order provided. If `cluster_log_conf` is specified, init script logs are sent to `//init_scripts`. + "instance_pool_id": + "description": |- + The optional ID of the instance pool to which the cluster belongs. + "is_single_node": + "description": | + This field can only be used with `kind`. + + When set to true, Databricks will automatically set single node related `custom_tags`, `spark_conf`, and `num_workers` + "kind": {} + "node_type_id": + "description": | + This field encodes, through a single value, the resources available to each of + the Spark nodes in this cluster. For example, the Spark nodes can be provisioned + and optimized for memory or compute intensive workloads. A list of available node + types can be retrieved by using the :method:clusters/listNodeTypes API call. + "num_workers": + "description": |- + Number of worker nodes that this cluster should have. A cluster has one Spark Driver + and `num_workers` Executors for a total of `num_workers` + 1 Spark nodes. + + Note: When reading the properties of a cluster, this field reflects the desired number + of workers rather than the actual current number of workers. For instance, if a cluster + is resized from 5 to 10 workers, this field will immediately be updated to reflect + the target size of 10 workers, whereas the workers listed in `spark_info` will gradually + increase from 5 to 10 as the new nodes are provisioned. + "policy_id": + "description": |- + The ID of the cluster policy used to create the cluster if applicable. + "runtime_engine": {} + "single_user_name": + "description": |- + Single user name if data_security_mode is `SINGLE_USER` + "spark_conf": + "description": | + An object containing a set of optional, user-specified Spark configuration key-value pairs. + Users can also pass in a string of extra JVM options to the driver and the executors via + `spark.driver.extraJavaOptions` and `spark.executor.extraJavaOptions` respectively. + "spark_env_vars": + "description": |- + An object containing a set of optional, user-specified environment variable key-value pairs. + Please note that key-value pair of the form (X,Y) will be exported as is (i.e., + `export X='Y'`) while launching the driver and workers. + + In order to specify an additional set of `SPARK_DAEMON_JAVA_OPTS`, we recommend appending + them to `$SPARK_DAEMON_JAVA_OPTS` as shown in the example below. This ensures that all + default databricks managed environmental variables are included as well. + + Example Spark environment variables: + `{"SPARK_WORKER_MEMORY": "28000m", "SPARK_LOCAL_DIRS": "/local_disk0"}` or + `{"SPARK_DAEMON_JAVA_OPTS": "$SPARK_DAEMON_JAVA_OPTS -Dspark.shuffle.service.enabled=true"}` + "spark_version": + "description": | + The Spark version of the cluster, e.g. `3.3.x-scala2.11`. + A list of available Spark versions can be retrieved by using + the :method:clusters/sparkVersions API call. + "ssh_public_keys": + "description": |- + SSH public key contents that will be added to each Spark node in this cluster. The + corresponding private keys can be used to login with the user name `ubuntu` on port `2200`. + Up to 10 keys can be specified. + "use_ml_runtime": + "description": | + This field can only be used with `kind`. + + `effective_spark_version` is determined by `spark_version` (DBR release), this field `use_ml_runtime`, and whether `node_type_id` is gpu node or not. + "workload_type": {} +github.com/databricks/cli/bundle/config/resources.Dashboard: + "create_time": + "description": |- + The timestamp of when the dashboard was created. + "dashboard_id": + "description": |- + UUID identifying the dashboard. + "display_name": + "description": |- + The display name of the dashboard. + "etag": + "description": |- + The etag for the dashboard. Can be optionally provided on updates to ensure that the dashboard + has not been modified since the last read. + This field is excluded in List Dashboards responses. + "lifecycle_state": + "description": |- + The state of the dashboard resource. Used for tracking trashed status. + "parent_path": + "description": |- + The workspace path of the folder containing the dashboard. Includes leading slash and no + trailing slash. + This field is excluded in List Dashboards responses. + "path": + "description": |- + The workspace path of the dashboard asset, including the file name. + Exported dashboards always have the file extension `.lvdash.json`. + This field is excluded in List Dashboards responses. + "serialized_dashboard": + "description": |- + The contents of the dashboard in serialized string form. + This field is excluded in List Dashboards responses. + Use the [get dashboard API](https://docs.databricks.com/api/workspace/lakeview/get) + to retrieve an example response, which includes the `serialized_dashboard` field. + This field provides the structure of the JSON string that represents the dashboard's + layout and components. + "update_time": + "description": |- + The timestamp of when the dashboard was last updated by the user. + This field is excluded in List Dashboards responses. + "warehouse_id": + "description": |- + The warehouse ID used to run the dashboard. +github.com/databricks/cli/bundle/config/resources.Job: + "budget_policy_id": + "description": |- + The id of the user specified budget policy to use for this job. + If not specified, a default budget policy may be applied when creating or modifying the job. + See `effective_budget_policy_id` for the budget policy used by this workload. + "continuous": + "description": |- + An optional continuous property for this job. The continuous property will ensure that there is always one run executing. Only one of `schedule` and `continuous` can be used. + "deployment": + "description": |- + Deployment information for jobs managed by external sources. + "description": + "description": |- + An optional description for the job. The maximum length is 27700 characters in UTF-8 encoding. + "edit_mode": + "description": |- + Edit mode of the job. + + * `UI_LOCKED`: The job is in a locked UI state and cannot be modified. + * `EDITABLE`: The job is in an editable state and can be modified. + "email_notifications": + "description": |- + An optional set of email addresses that is notified when runs of this job begin or complete as well as when this job is deleted. + "environments": + "description": |- + A list of task execution environment specifications that can be referenced by serverless tasks of this job. + An environment is required to be present for serverless tasks. + For serverless notebook tasks, the environment is accessible in the notebook environment panel. + For other serverless tasks, the task environment is required to be specified using environment_key in the task settings. + "format": + "description": |- + Used to tell what is the format of the job. This field is ignored in Create/Update/Reset calls. When using the Jobs API 2.1 this value is always set to `"MULTI_TASK"`. + "git_source": + "description": |- + An optional specification for a remote Git repository containing the source code used by tasks. Version-controlled source code is supported by notebook, dbt, Python script, and SQL File tasks. + + If `git_source` is set, these tasks retrieve the file from the remote repository by default. However, this behavior can be overridden by setting `source` to `WORKSPACE` on the task. + + Note: dbt and SQL File tasks support only version-controlled sources. If dbt or SQL File tasks are used, `git_source` must be defined on the job. + "health": {} + "job_clusters": + "description": |- + A list of job cluster specifications that can be shared and reused by tasks of this job. Libraries cannot be declared in a shared job cluster. You must declare dependent libraries in task settings. + "max_concurrent_runs": + "description": |- + An optional maximum allowed number of concurrent runs of the job. + Set this value if you want to be able to execute multiple runs of the same job concurrently. + This is useful for example if you trigger your job on a frequent schedule and want to allow consecutive runs to overlap with each other, or if you want to trigger multiple runs which differ by their input parameters. + This setting affects only new runs. For example, suppose the job’s concurrency is 4 and there are 4 concurrent active runs. Then setting the concurrency to 3 won’t kill any of the active runs. + However, from then on, new runs are skipped unless there are fewer than 3 active runs. + This value cannot exceed 1000. Setting this value to `0` causes all new runs to be skipped. + "name": + "description": |- + An optional name for the job. The maximum length is 4096 bytes in UTF-8 encoding. + "notification_settings": + "description": |- + Optional notification settings that are used when sending notifications to each of the `email_notifications` and `webhook_notifications` for this job. + "parameters": + "description": |- + Job-level parameter definitions + "queue": + "description": |- + The queue settings of the job. + "run_as": {} + "schedule": + "description": |- + An optional periodic schedule for this job. The default behavior is that the job only runs when triggered by clicking “Run Now” in the Jobs UI or sending an API request to `runNow`. + "tags": + "description": |- + A map of tags associated with the job. These are forwarded to the cluster as cluster tags for jobs clusters, and are subject to the same limitations as cluster tags. A maximum of 25 tags can be added to the job. + "tasks": + "description": |- + A list of task specifications to be executed by this job. + "timeout_seconds": + "description": |- + An optional timeout applied to each run of this job. A value of `0` means no timeout. + "trigger": + "description": |- + A configuration to trigger a run when certain conditions are met. The default behavior is that the job runs only when triggered by clicking “Run Now” in the Jobs UI or sending an API request to `runNow`. + "webhook_notifications": + "description": |- + A collection of system notification IDs to notify when runs of this job begin or complete. +github.com/databricks/cli/bundle/config/resources.MlflowExperiment: + "artifact_location": + "description": |- + Location where artifacts for the experiment are stored. + "creation_time": + "description": |- + Creation time + "experiment_id": + "description": |- + Unique identifier for the experiment. + "last_update_time": + "description": |- + Last update time + "lifecycle_stage": + "description": |- + Current life cycle stage of the experiment: "active" or "deleted". + Deleted experiments are not returned by APIs. + "name": + "description": |- + Human readable name that identifies the experiment. + "tags": + "description": |- + Tags: Additional metadata key-value pairs. +github.com/databricks/cli/bundle/config/resources.MlflowModel: + "creation_timestamp": + "description": |- + Timestamp recorded when this `registered_model` was created. + "description": + "description": |- + Description of this `registered_model`. + "last_updated_timestamp": + "description": |- + Timestamp recorded when metadata for this `registered_model` was last updated. + "latest_versions": + "description": |- + Collection of latest model versions for each stage. + Only contains models with current `READY` status. + "name": + "description": |- + Unique name for the model. + "tags": + "description": |- + Tags: Additional metadata key-value pairs for this `registered_model`. + "user_id": + "description": |- + User that created this `registered_model` +github.com/databricks/cli/bundle/config/resources.ModelServingEndpoint: + "ai_gateway": + "description": |- + The AI Gateway configuration for the serving endpoint. NOTE: only external model endpoints are supported as of now. + "config": + "description": |- + The core config of the serving endpoint. + "name": + "description": | + The name of the serving endpoint. This field is required and must be unique across a Databricks workspace. + An endpoint name can consist of alphanumeric characters, dashes, and underscores. + "rate_limits": + "description": |- + Rate limits to be applied to the serving endpoint. NOTE: this field is deprecated, please use AI Gateway to manage rate limits. + "route_optimized": + "description": |- + Enable route optimization for the serving endpoint. + "tags": + "description": |- + Tags to be attached to the serving endpoint and automatically propagated to billing logs. +github.com/databricks/cli/bundle/config/resources.Pipeline: + "budget_policy_id": + "description": |- + Budget policy of this pipeline. + "catalog": + "description": |- + A catalog in Unity Catalog to publish data from this pipeline to. If `target` is specified, tables in this pipeline are published to a `target` schema inside `catalog` (for example, `catalog`.`target`.`table`). If `target` is not specified, no data is published to Unity Catalog. + "channel": + "description": |- + DLT Release Channel that specifies which version to use. + "clusters": + "description": |- + Cluster settings for this pipeline deployment. + "configuration": + "description": |- + String-String configuration for this pipeline execution. + "continuous": + "description": |- + Whether the pipeline is continuous or triggered. This replaces `trigger`. + "deployment": + "description": |- + Deployment type of this pipeline. + "development": + "description": |- + Whether the pipeline is in Development mode. Defaults to false. + "edition": + "description": |- + Pipeline product edition. + "filters": + "description": |- + Filters on which Pipeline packages to include in the deployed graph. + "gateway_definition": + "description": |- + The definition of a gateway pipeline to support change data capture. + "id": + "description": |- + Unique identifier for this pipeline. + "ingestion_definition": + "description": |- + The configuration for a managed ingestion pipeline. These settings cannot be used with the 'libraries', 'target' or 'catalog' settings. + "libraries": + "description": |- + Libraries or code needed by this deployment. + "name": + "description": |- + Friendly identifier for this pipeline. + "notifications": + "description": |- + List of notification settings for this pipeline. + "photon": + "description": |- + Whether Photon is enabled for this pipeline. + "restart_window": + "description": |- + Restart window of this pipeline. + "schema": + "description": |- + The default schema (database) where tables are read from or published to. The presence of this field implies that the pipeline is in direct publishing mode. + "serverless": + "description": |- + Whether serverless compute is enabled for this pipeline. + "storage": + "description": |- + DBFS root directory for storing checkpoints and tables. + "target": + "description": |- + Target schema (database) to add tables in this pipeline to. If not specified, no data is published to the Hive metastore or Unity Catalog. To publish to Unity Catalog, also specify `catalog`. + "trigger": + "description": |- + Which pipeline trigger to use. Deprecated: Use `continuous` instead. +github.com/databricks/cli/bundle/config/resources.QualityMonitor: + "assets_dir": + "description": |- + The directory to store monitoring assets (e.g. dashboard, metric tables). + "baseline_table_name": + "description": | + Name of the baseline table from which drift metrics are computed from. + Columns in the monitored table should also be present in the baseline table. + "custom_metrics": + "description": | + Custom metrics to compute on the monitored table. These can be aggregate metrics, derived + metrics (from already computed aggregate metrics), or drift metrics (comparing metrics across time + windows). + "data_classification_config": + "description": |- + The data classification config for the monitor. + "inference_log": + "description": |- + Configuration for monitoring inference logs. + "notifications": + "description": |- + The notification settings for the monitor. + "output_schema_name": + "description": |- + Schema where output metric tables are created. + "schedule": + "description": |- + The schedule for automatically updating and refreshing metric tables. + "skip_builtin_dashboard": + "description": |- + Whether to skip creating a default dashboard summarizing data quality metrics. + "slicing_exprs": + "description": | + List of column expressions to slice data with for targeted analysis. The data is grouped by + each expression independently, resulting in a separate slice for each predicate and its + complements. For high-cardinality columns, only the top 100 unique values by frequency will + generate slices. + "snapshot": + "description": |- + Configuration for monitoring snapshot tables. + "time_series": + "description": |- + Configuration for monitoring time series tables. + "warehouse_id": + "description": | + Optional argument to specify the warehouse for dashboard creation. If not specified, the first running + warehouse will be used. +github.com/databricks/cli/bundle/config/resources.RegisteredModel: + "catalog_name": + "description": |- + The name of the catalog where the schema and the registered model reside + "comment": + "description": |- + The comment attached to the registered model + "name": + "description": |- + The name of the registered model + "schema_name": + "description": |- + The name of the schema where the registered model resides + "storage_location": + "description": |- + The storage location on the cloud under which model version data files are stored +github.com/databricks/cli/bundle/config/resources.Schema: + "catalog_name": + "description": |- + Name of parent catalog. + "comment": + "description": |- + User-provided free-form text description. + "name": + "description": |- + Name of schema, relative to parent catalog. + "properties": {} + "storage_root": + "description": |- + Storage root URL for managed tables within schema. +github.com/databricks/cli/bundle/config/resources.Volume: + "catalog_name": + "description": |- + The name of the catalog where the schema and the volume are + "comment": + "description": |- + The comment attached to the volume + "name": + "description": |- + The name of the volume + "schema_name": + "description": |- + The name of the schema where the volume is + "storage_location": + "description": |- + The storage location on the cloud + "volume_type": {} +github.com/databricks/databricks-sdk-go/service/catalog.MonitorCronSchedule: + "pause_status": + "description": |- + Read only field that indicates whether a schedule is paused or not. + "quartz_cron_expression": + "description": | + The expression that determines when to run the monitor. See [examples](https://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/crontrigger.html). + "timezone_id": + "description": | + The timezone id (e.g., ``"PST"``) in which to evaluate the quartz expression. +github.com/databricks/databricks-sdk-go/service/catalog.MonitorCronSchedulePauseStatus: + "_": + "description": |- + Read only field that indicates whether a schedule is paused or not. + "enum": + - |- + UNPAUSED + - |- + PAUSED +github.com/databricks/databricks-sdk-go/service/catalog.MonitorDataClassificationConfig: + "enabled": + "description": |- + Whether data classification is enabled. +github.com/databricks/databricks-sdk-go/service/catalog.MonitorDestination: + "email_addresses": + "description": |- + The list of email addresses to send the notification to. A maximum of 5 email addresses is supported. +github.com/databricks/databricks-sdk-go/service/catalog.MonitorInferenceLog: + "granularities": + "description": | + Granularities for aggregating data into time windows based on their timestamp. Currently the following static + granularities are supported: + {``"5 minutes"``, ``"30 minutes"``, ``"1 hour"``, ``"1 day"``, ``" week(s)"``, ``"1 month"``, ``"1 year"``}. + "label_col": + "description": |- + Optional column that contains the ground truth for the prediction. + "model_id_col": + "description": | + Column that contains the id of the model generating the predictions. Metrics will be computed per model id by + default, and also across all model ids. + "prediction_col": + "description": |- + Column that contains the output/prediction from the model. + "prediction_proba_col": + "description": | + Optional column that contains the prediction probabilities for each class in a classification problem type. + The values in this column should be a map, mapping each class label to the prediction probability for a given + sample. The map should be of PySpark MapType(). + "problem_type": + "description": |- + Problem type the model aims to solve. Determines the type of model-quality metrics that will be computed. + "timestamp_col": + "description": | + Column that contains the timestamps of requests. The column must be one of the following: + - A ``TimestampType`` column + - A column whose values can be converted to timestamps through the pyspark + ``to_timestamp`` [function](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.to_timestamp.html). +github.com/databricks/databricks-sdk-go/service/catalog.MonitorInferenceLogProblemType: + "_": + "description": |- + Problem type the model aims to solve. Determines the type of model-quality metrics that will be computed. + "enum": + - |- + PROBLEM_TYPE_CLASSIFICATION + - |- + PROBLEM_TYPE_REGRESSION +github.com/databricks/databricks-sdk-go/service/catalog.MonitorMetric: + "definition": + "description": |- + Jinja template for a SQL expression that specifies how to compute the metric. See [create metric definition](https://docs.databricks.com/en/lakehouse-monitoring/custom-metrics.html#create-definition). + "input_columns": + "description": | + A list of column names in the input table the metric should be computed for. + Can use ``":table"`` to indicate that the metric needs information from multiple columns. + "name": + "description": |- + Name of the metric in the output tables. + "output_data_type": + "description": |- + The output type of the custom metric. + "type": + "description": | + Can only be one of ``"CUSTOM_METRIC_TYPE_AGGREGATE"``, ``"CUSTOM_METRIC_TYPE_DERIVED"``, or ``"CUSTOM_METRIC_TYPE_DRIFT"``. + The ``"CUSTOM_METRIC_TYPE_AGGREGATE"`` and ``"CUSTOM_METRIC_TYPE_DERIVED"`` metrics + are computed on a single table, whereas the ``"CUSTOM_METRIC_TYPE_DRIFT"`` compare metrics across + baseline and input table, or across the two consecutive time windows. + - CUSTOM_METRIC_TYPE_AGGREGATE: only depend on the existing columns in your table + - CUSTOM_METRIC_TYPE_DERIVED: depend on previously computed aggregate metrics + - CUSTOM_METRIC_TYPE_DRIFT: depend on previously computed aggregate or derived metrics +github.com/databricks/databricks-sdk-go/service/catalog.MonitorMetricType: + "_": + "description": | + Can only be one of ``"CUSTOM_METRIC_TYPE_AGGREGATE"``, ``"CUSTOM_METRIC_TYPE_DERIVED"``, or ``"CUSTOM_METRIC_TYPE_DRIFT"``. + The ``"CUSTOM_METRIC_TYPE_AGGREGATE"`` and ``"CUSTOM_METRIC_TYPE_DERIVED"`` metrics + are computed on a single table, whereas the ``"CUSTOM_METRIC_TYPE_DRIFT"`` compare metrics across + baseline and input table, or across the two consecutive time windows. + - CUSTOM_METRIC_TYPE_AGGREGATE: only depend on the existing columns in your table + - CUSTOM_METRIC_TYPE_DERIVED: depend on previously computed aggregate metrics + - CUSTOM_METRIC_TYPE_DRIFT: depend on previously computed aggregate or derived metrics + "enum": + - |- + CUSTOM_METRIC_TYPE_AGGREGATE + - |- + CUSTOM_METRIC_TYPE_DERIVED + - |- + CUSTOM_METRIC_TYPE_DRIFT +github.com/databricks/databricks-sdk-go/service/catalog.MonitorNotifications: + "on_failure": + "description": |- + Who to send notifications to on monitor failure. + "on_new_classification_tag_detected": + "description": |- + Who to send notifications to when new data classification tags are detected. +github.com/databricks/databricks-sdk-go/service/catalog.MonitorSnapshot: {} +github.com/databricks/databricks-sdk-go/service/catalog.MonitorTimeSeries: + "granularities": + "description": | + Granularities for aggregating data into time windows based on their timestamp. Currently the following static + granularities are supported: + {``"5 minutes"``, ``"30 minutes"``, ``"1 hour"``, ``"1 day"``, ``" week(s)"``, ``"1 month"``, ``"1 year"``}. + "timestamp_col": + "description": | + Column that contains the timestamps of requests. The column must be one of the following: + - A ``TimestampType`` column + - A column whose values can be converted to timestamps through the pyspark + ``to_timestamp`` [function](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.to_timestamp.html). +github.com/databricks/databricks-sdk-go/service/catalog.VolumeType: + "_": + "enum": + - |- + EXTERNAL + - |- + MANAGED +github.com/databricks/databricks-sdk-go/service/compute.Adlsgen2Info: + "destination": + "description": |- + abfss destination, e.g. `abfss://@.dfs.core.windows.net/`. +github.com/databricks/databricks-sdk-go/service/compute.AutoScale: + "max_workers": + "description": |- + The maximum number of workers to which the cluster can scale up when overloaded. + Note that `max_workers` must be strictly greater than `min_workers`. + "min_workers": + "description": |- + The minimum number of workers to which the cluster can scale down when underutilized. + It is also the initial number of workers the cluster will have after creation. +github.com/databricks/databricks-sdk-go/service/compute.AwsAttributes: + "availability": {} + "ebs_volume_count": + "description": |- + The number of volumes launched for each instance. Users can choose up to 10 volumes. + This feature is only enabled for supported node types. Legacy node types cannot specify + custom EBS volumes. + For node types with no instance store, at least one EBS volume needs to be specified; + otherwise, cluster creation will fail. + + These EBS volumes will be mounted at `/ebs0`, `/ebs1`, and etc. + Instance store volumes will be mounted at `/local_disk0`, `/local_disk1`, and etc. + + If EBS volumes are attached, Databricks will configure Spark to use only the EBS volumes for + scratch storage because heterogenously sized scratch devices can lead to inefficient disk + utilization. If no EBS volumes are attached, Databricks will configure Spark to use instance + store volumes. + + Please note that if EBS volumes are specified, then the Spark configuration `spark.local.dir` + will be overridden. + "ebs_volume_iops": + "description": |- + If using gp3 volumes, what IOPS to use for the disk. If this is not set, the maximum performance of a gp2 volume with the same volume size will be used. + "ebs_volume_size": + "description": |- + The size of each EBS volume (in GiB) launched for each instance. For general purpose + SSD, this value must be within the range 100 - 4096. For throughput optimized HDD, + this value must be within the range 500 - 4096. + "ebs_volume_throughput": + "description": |- + If using gp3 volumes, what throughput to use for the disk. If this is not set, the maximum performance of a gp2 volume with the same volume size will be used. + "ebs_volume_type": {} + "first_on_demand": + "description": |- + The first `first_on_demand` nodes of the cluster will be placed on on-demand instances. + If this value is greater than 0, the cluster driver node in particular will be placed on an + on-demand instance. If this value is greater than or equal to the current cluster size, all + nodes will be placed on on-demand instances. If this value is less than the current cluster + size, `first_on_demand` nodes will be placed on on-demand instances and the remainder will + be placed on `availability` instances. Note that this value does not affect + cluster size and cannot currently be mutated over the lifetime of a cluster. + "instance_profile_arn": + "description": |- + Nodes for this cluster will only be placed on AWS instances with this instance profile. If + ommitted, nodes will be placed on instances without an IAM instance profile. The instance + profile must have previously been added to the Databricks environment by an account + administrator. + + This feature may only be available to certain customer plans. + + If this field is ommitted, we will pull in the default from the conf if it exists. + "spot_bid_price_percent": + "description": |- + The bid price for AWS spot instances, as a percentage of the corresponding instance type's + on-demand price. + For example, if this field is set to 50, and the cluster needs a new `r3.xlarge` spot + instance, then the bid price is half of the price of + on-demand `r3.xlarge` instances. Similarly, if this field is set to 200, the bid price is twice + the price of on-demand `r3.xlarge` instances. If not specified, the default value is 100. + When spot instances are requested for this cluster, only spot instances whose bid price + percentage matches this field will be considered. + Note that, for safety, we enforce this field to be no more than 10000. + + The default value and documentation here should be kept consistent with + CommonConf.defaultSpotBidPricePercent and CommonConf.maxSpotBidPricePercent. + "zone_id": + "description": |- + Identifier for the availability zone/datacenter in which the cluster resides. + This string will be of a form like "us-west-2a". The provided availability + zone must be in the same region as the Databricks deployment. For example, "us-west-2a" + is not a valid zone id if the Databricks deployment resides in the "us-east-1" region. + This is an optional field at cluster creation, and if not specified, a default zone will be used. + If the zone specified is "auto", will try to place cluster in a zone with high availability, + and will retry placement in a different AZ if there is not enough capacity. + The list of available zones as well as the default value can be found by using the + `List Zones` method. +github.com/databricks/databricks-sdk-go/service/compute.AwsAvailability: + "_": + "description": | + Availability type used for all subsequent nodes past the `first_on_demand` ones. + + Note: If `first_on_demand` is zero, this availability type will be used for the entire cluster. + "enum": + - |- + SPOT + - |- + ON_DEMAND + - |- + SPOT_WITH_FALLBACK +github.com/databricks/databricks-sdk-go/service/compute.AzureAttributes: + "availability": {} + "first_on_demand": + "description": |- + The first `first_on_demand` nodes of the cluster will be placed on on-demand instances. + This value should be greater than 0, to make sure the cluster driver node is placed on an + on-demand instance. If this value is greater than or equal to the current cluster size, all + nodes will be placed on on-demand instances. If this value is less than the current cluster + size, `first_on_demand` nodes will be placed on on-demand instances and the remainder will + be placed on `availability` instances. Note that this value does not affect + cluster size and cannot currently be mutated over the lifetime of a cluster. + "log_analytics_info": + "description": |- + Defines values necessary to configure and run Azure Log Analytics agent + "spot_bid_max_price": + "description": |- + The max bid price to be used for Azure spot instances. + The Max price for the bid cannot be higher than the on-demand price of the instance. + If not specified, the default value is -1, which specifies that the instance cannot be evicted + on the basis of price, and only on the basis of availability. Further, the value should > 0 or -1. +github.com/databricks/databricks-sdk-go/service/compute.AzureAvailability: + "_": + "description": |- + Availability type used for all subsequent nodes past the `first_on_demand` ones. + Note: If `first_on_demand` is zero (which only happens on pool clusters), this availability + type will be used for the entire cluster. + "enum": + - |- + SPOT_AZURE + - |- + ON_DEMAND_AZURE + - |- + SPOT_WITH_FALLBACK_AZURE +github.com/databricks/databricks-sdk-go/service/compute.ClientsTypes: + "jobs": + "description": |- + With jobs set, the cluster can be used for jobs + "notebooks": + "description": |- + With notebooks set, this cluster can be used for notebooks +github.com/databricks/databricks-sdk-go/service/compute.ClusterLogConf: + "dbfs": + "description": |- + destination needs to be provided. e.g. + `{ "dbfs" : { "destination" : "dbfs:/home/cluster_log" } }` + "s3": + "description": |- + destination and either the region or endpoint need to be provided. e.g. + `{ "s3": { "destination" : "s3://cluster_log_bucket/prefix", "region" : "us-west-2" } }` + Cluster iam role is used to access s3, please make sure the cluster iam role in + `instance_profile_arn` has permission to write data to the s3 destination. +github.com/databricks/databricks-sdk-go/service/compute.ClusterSpec: + "apply_policy_default_values": + "description": |- + When set to true, fixed and default values from the policy will be used for fields that are omitted. When set to false, only fixed values from the policy will be applied. + "autoscale": + "description": |- + Parameters needed in order to automatically scale clusters up and down based on load. + Note: autoscaling works best with DB runtime versions 3.0 or later. + "autotermination_minutes": + "description": |- + Automatically terminates the cluster after it is inactive for this time in minutes. If not set, + this cluster will not be automatically terminated. If specified, the threshold must be between + 10 and 10000 minutes. + Users can also set this value to 0 to explicitly disable automatic termination. + "aws_attributes": + "description": |- + Attributes related to clusters running on Amazon Web Services. + If not specified at cluster creation, a set of default values will be used. + "azure_attributes": + "description": |- + Attributes related to clusters running on Microsoft Azure. + If not specified at cluster creation, a set of default values will be used. + "cluster_log_conf": + "description": |- + The configuration for delivering spark logs to a long-term storage destination. + Two kinds of destinations (dbfs and s3) are supported. Only one destination can be specified + for one cluster. If the conf is given, the logs will be delivered to the destination every + `5 mins`. The destination of driver logs is `$destination/$clusterId/driver`, while + the destination of executor logs is `$destination/$clusterId/executor`. + "cluster_name": + "description": | + Cluster name requested by the user. This doesn't have to be unique. + If not specified at creation, the cluster name will be an empty string. + "custom_tags": + "description": |- + Additional tags for cluster resources. Databricks will tag all cluster resources (e.g., AWS + instances and EBS volumes) with these tags in addition to `default_tags`. Notes: + + - Currently, Databricks allows at most 45 custom tags + + - Clusters can only reuse cloud resources if the resources' tags are a subset of the cluster tags + "data_security_mode": {} + "docker_image": {} + "driver_instance_pool_id": + "description": |- + The optional ID of the instance pool for the driver of the cluster belongs. + The pool cluster uses the instance pool with id (instance_pool_id) if the driver pool is not + assigned. + "driver_node_type_id": + "description": | + The node type of the Spark driver. Note that this field is optional; + if unset, the driver node type will be set as the same value + as `node_type_id` defined above. + "enable_elastic_disk": + "description": |- + Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk + space when its Spark workers are running low on disk space. This feature requires specific AWS + permissions to function correctly - refer to the User Guide for more details. + "enable_local_disk_encryption": + "description": |- + Whether to enable LUKS on cluster VMs' local disks + "gcp_attributes": + "description": |- + Attributes related to clusters running on Google Cloud Platform. + If not specified at cluster creation, a set of default values will be used. + "init_scripts": + "description": |- + The configuration for storing init scripts. Any number of destinations can be specified. The scripts are executed sequentially in the order provided. If `cluster_log_conf` is specified, init script logs are sent to `//init_scripts`. + "instance_pool_id": + "description": |- + The optional ID of the instance pool to which the cluster belongs. + "is_single_node": + "description": | + This field can only be used with `kind`. + + When set to true, Databricks will automatically set single node related `custom_tags`, `spark_conf`, and `num_workers` + "kind": {} + "node_type_id": + "description": | + This field encodes, through a single value, the resources available to each of + the Spark nodes in this cluster. For example, the Spark nodes can be provisioned + and optimized for memory or compute intensive workloads. A list of available node + types can be retrieved by using the :method:clusters/listNodeTypes API call. + "num_workers": + "description": |- + Number of worker nodes that this cluster should have. A cluster has one Spark Driver + and `num_workers` Executors for a total of `num_workers` + 1 Spark nodes. + + Note: When reading the properties of a cluster, this field reflects the desired number + of workers rather than the actual current number of workers. For instance, if a cluster + is resized from 5 to 10 workers, this field will immediately be updated to reflect + the target size of 10 workers, whereas the workers listed in `spark_info` will gradually + increase from 5 to 10 as the new nodes are provisioned. + "policy_id": + "description": |- + The ID of the cluster policy used to create the cluster if applicable. + "runtime_engine": {} + "single_user_name": + "description": |- + Single user name if data_security_mode is `SINGLE_USER` + "spark_conf": + "description": | + An object containing a set of optional, user-specified Spark configuration key-value pairs. + Users can also pass in a string of extra JVM options to the driver and the executors via + `spark.driver.extraJavaOptions` and `spark.executor.extraJavaOptions` respectively. + "spark_env_vars": + "description": |- + An object containing a set of optional, user-specified environment variable key-value pairs. + Please note that key-value pair of the form (X,Y) will be exported as is (i.e., + `export X='Y'`) while launching the driver and workers. + + In order to specify an additional set of `SPARK_DAEMON_JAVA_OPTS`, we recommend appending + them to `$SPARK_DAEMON_JAVA_OPTS` as shown in the example below. This ensures that all + default databricks managed environmental variables are included as well. + + Example Spark environment variables: + `{"SPARK_WORKER_MEMORY": "28000m", "SPARK_LOCAL_DIRS": "/local_disk0"}` or + `{"SPARK_DAEMON_JAVA_OPTS": "$SPARK_DAEMON_JAVA_OPTS -Dspark.shuffle.service.enabled=true"}` + "spark_version": + "description": | + The Spark version of the cluster, e.g. `3.3.x-scala2.11`. + A list of available Spark versions can be retrieved by using + the :method:clusters/sparkVersions API call. + "ssh_public_keys": + "description": |- + SSH public key contents that will be added to each Spark node in this cluster. The + corresponding private keys can be used to login with the user name `ubuntu` on port `2200`. + Up to 10 keys can be specified. + "use_ml_runtime": + "description": | + This field can only be used with `kind`. + + `effective_spark_version` is determined by `spark_version` (DBR release), this field `use_ml_runtime`, and whether `node_type_id` is gpu node or not. + "workload_type": {} +github.com/databricks/databricks-sdk-go/service/compute.DataSecurityMode: + "_": + "description": | + Data security mode decides what data governance model to use when accessing data + from a cluster. + + The following modes can only be used with `kind`. + * `DATA_SECURITY_MODE_AUTO`: Databricks will choose the most appropriate access mode depending on your compute configuration. + * `DATA_SECURITY_MODE_STANDARD`: Alias for `USER_ISOLATION`. + * `DATA_SECURITY_MODE_DEDICATED`: Alias for `SINGLE_USER`. + + The following modes can be used regardless of `kind`. + * `NONE`: No security isolation for multiple users sharing the cluster. Data governance features are not available in this mode. + * `SINGLE_USER`: A secure cluster that can only be exclusively used by a single user specified in `single_user_name`. Most programming languages, cluster features and data governance features are available in this mode. + * `USER_ISOLATION`: A secure cluster that can be shared by multiple users. Cluster users are fully isolated so that they cannot see each other's data and credentials. Most data governance features are supported in this mode. But programming languages and cluster features might be limited. + + The following modes are deprecated starting with Databricks Runtime 15.0 and + will be removed for future Databricks Runtime versions: + + * `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table ACL clusters. + * `LEGACY_PASSTHROUGH`: This mode is for users migrating from legacy Passthrough on high concurrency clusters. + * `LEGACY_SINGLE_USER`: This mode is for users migrating from legacy Passthrough on standard clusters. + * `LEGACY_SINGLE_USER_STANDARD`: This mode provides a way that doesn’t have UC nor passthrough enabled. + "enum": + - |- + DATA_SECURITY_MODE_AUTO + - |- + DATA_SECURITY_MODE_STANDARD + - |- + DATA_SECURITY_MODE_DEDICATED + - |- + NONE + - |- + SINGLE_USER + - |- + USER_ISOLATION + - |- + LEGACY_TABLE_ACL + - |- + LEGACY_PASSTHROUGH + - |- + LEGACY_SINGLE_USER + - |- + LEGACY_SINGLE_USER_STANDARD +github.com/databricks/databricks-sdk-go/service/compute.DbfsStorageInfo: + "destination": + "description": |- + dbfs destination, e.g. `dbfs:/my/path` +github.com/databricks/databricks-sdk-go/service/compute.DockerBasicAuth: + "password": + "description": |- + Password of the user + "username": + "description": |- + Name of the user +github.com/databricks/databricks-sdk-go/service/compute.DockerImage: + "basic_auth": {} + "url": + "description": |- + URL of the docker image. +github.com/databricks/databricks-sdk-go/service/compute.EbsVolumeType: + "_": + "description": |- + The type of EBS volumes that will be launched with this cluster. + "enum": + - |- + GENERAL_PURPOSE_SSD + - |- + THROUGHPUT_OPTIMIZED_HDD +github.com/databricks/databricks-sdk-go/service/compute.Environment: + "_": + "description": |- + The environment entity used to preserve serverless environment side panel and jobs' environment for non-notebook task. + In this minimal environment spec, only pip dependencies are supported. + "client": + "description": |- + Client version used by the environment + The client is the user-facing environment of the runtime. + Each client comes with a specific set of pre-installed libraries. + The version is a string, consisting of the major client version. + "dependencies": + "description": |- + List of pip dependencies, as supported by the version of pip in this environment. + Each dependency is a pip requirement file line https://pip.pypa.io/en/stable/reference/requirements-file-format/ + Allowed dependency could be , , (WSFS or Volumes in Databricks), + E.g. dependencies: ["foo==0.0.1", "-r /Workspace/test/requirements.txt"] +github.com/databricks/databricks-sdk-go/service/compute.GcpAttributes: + "availability": {} + "boot_disk_size": + "description": |- + boot disk size in GB + "google_service_account": + "description": |- + If provided, the cluster will impersonate the google service account when accessing + gcloud services (like GCS). The google service account + must have previously been added to the Databricks environment by an account + administrator. + "local_ssd_count": + "description": |- + If provided, each node (workers and driver) in the cluster will have this number of local SSDs attached. Each local SSD is 375GB in size. Refer to [GCP documentation](https://cloud.google.com/compute/docs/disks/local-ssd#choose_number_local_ssds) for the supported number of local SSDs for each instance type. + "use_preemptible_executors": + "description": |- + This field determines whether the spark executors will be scheduled to run on preemptible VMs (when set to true) versus standard compute engine VMs (when set to false; default). + Note: Soon to be deprecated, use the availability field instead. + "zone_id": + "description": |- + Identifier for the availability zone in which the cluster resides. + This can be one of the following: + - "HA" => High availability, spread nodes across availability zones for a Databricks deployment region [default] + - "AUTO" => Databricks picks an availability zone to schedule the cluster on. + - A GCP availability zone => Pick One of the available zones for (machine type + region) from https://cloud.google.com/compute/docs/regions-zones. +github.com/databricks/databricks-sdk-go/service/compute.GcpAvailability: + "_": + "description": |- + This field determines whether the instance pool will contain preemptible + VMs, on-demand VMs, or preemptible VMs with a fallback to on-demand VMs if the former is unavailable. + "enum": + - |- + PREEMPTIBLE_GCP + - |- + ON_DEMAND_GCP + - |- + PREEMPTIBLE_WITH_FALLBACK_GCP +github.com/databricks/databricks-sdk-go/service/compute.GcsStorageInfo: + "destination": + "description": |- + GCS destination/URI, e.g. `gs://my-bucket/some-prefix` +github.com/databricks/databricks-sdk-go/service/compute.InitScriptInfo: + "abfss": + "description": |- + destination needs to be provided. e.g. + `{ "abfss" : { "destination" : "abfss://@.dfs.core.windows.net/" } } + "dbfs": + "description": |- + destination needs to be provided. e.g. + `{ "dbfs" : { "destination" : "dbfs:/home/cluster_log" } }` + "file": + "description": |- + destination needs to be provided. e.g. + `{ "file" : { "destination" : "file:/my/local/file.sh" } }` + "gcs": + "description": |- + destination needs to be provided. e.g. + `{ "gcs": { "destination": "gs://my-bucket/file.sh" } }` + "s3": + "description": |- + destination and either the region or endpoint need to be provided. e.g. + `{ "s3": { "destination" : "s3://cluster_log_bucket/prefix", "region" : "us-west-2" } }` + Cluster iam role is used to access s3, please make sure the cluster iam role in + `instance_profile_arn` has permission to write data to the s3 destination. + "volumes": + "description": |- + destination needs to be provided. e.g. + `{ "volumes" : { "destination" : "/Volumes/my-init.sh" } }` + "workspace": + "description": |- + destination needs to be provided. e.g. + `{ "workspace" : { "destination" : "/Users/user1@databricks.com/my-init.sh" } }` +github.com/databricks/databricks-sdk-go/service/compute.Library: + "cran": + "description": |- + Specification of a CRAN library to be installed as part of the library + "egg": + "description": |- + Deprecated. URI of the egg library to install. Installing Python egg files is deprecated and is not supported in Databricks Runtime 14.0 and above. + "jar": + "description": |- + URI of the JAR library to install. Supported URIs include Workspace paths, Unity Catalog Volumes paths, and S3 URIs. + For example: `{ "jar": "/Workspace/path/to/library.jar" }`, `{ "jar" : "/Volumes/path/to/library.jar" }` or + `{ "jar": "s3://my-bucket/library.jar" }`. + If S3 is used, please make sure the cluster has read access on the library. You may need to + launch the cluster with an IAM role to access the S3 URI. + "maven": + "description": |- + Specification of a maven library to be installed. For example: + `{ "coordinates": "org.jsoup:jsoup:1.7.2" }` + "pypi": + "description": |- + Specification of a PyPi library to be installed. For example: + `{ "package": "simplejson" }` + "requirements": + "description": |- + URI of the requirements.txt file to install. Only Workspace paths and Unity Catalog Volumes paths are supported. + For example: `{ "requirements": "/Workspace/path/to/requirements.txt" }` or `{ "requirements" : "/Volumes/path/to/requirements.txt" }` + "whl": + "description": |- + URI of the wheel library to install. Supported URIs include Workspace paths, Unity Catalog Volumes paths, and S3 URIs. + For example: `{ "whl": "/Workspace/path/to/library.whl" }`, `{ "whl" : "/Volumes/path/to/library.whl" }` or + `{ "whl": "s3://my-bucket/library.whl" }`. + If S3 is used, please make sure the cluster has read access on the library. You may need to + launch the cluster with an IAM role to access the S3 URI. +github.com/databricks/databricks-sdk-go/service/compute.LocalFileInfo: + "destination": + "description": |- + local file destination, e.g. `file:/my/local/file.sh` +github.com/databricks/databricks-sdk-go/service/compute.LogAnalyticsInfo: + "log_analytics_primary_key": + "description": |- + + "log_analytics_workspace_id": + "description": |- + +github.com/databricks/databricks-sdk-go/service/compute.MavenLibrary: + "coordinates": + "description": |- + Gradle-style maven coordinates. For example: "org.jsoup:jsoup:1.7.2". + "exclusions": + "description": |- + List of dependences to exclude. For example: `["slf4j:slf4j", "*:hadoop-client"]`. + + Maven dependency exclusions: + https://maven.apache.org/guides/introduction/introduction-to-optional-and-excludes-dependencies.html. + "repo": + "description": |- + Maven repo to install the Maven package from. If omitted, both Maven Central Repository + and Spark Packages are searched. +github.com/databricks/databricks-sdk-go/service/compute.PythonPyPiLibrary: + "package": + "description": |- + The name of the pypi package to install. An optional exact version specification is also + supported. Examples: "simplejson" and "simplejson==3.8.0". + "repo": + "description": |- + The repository where the package can be found. If not specified, the default pip index is + used. +github.com/databricks/databricks-sdk-go/service/compute.RCranLibrary: + "package": + "description": |- + The name of the CRAN package to install. + "repo": + "description": |- + The repository where the package can be found. If not specified, the default CRAN repo is used. +github.com/databricks/databricks-sdk-go/service/compute.RuntimeEngine: + "_": + "description": | + Determines the cluster's runtime engine, either standard or Photon. + + This field is not compatible with legacy `spark_version` values that contain `-photon-`. + Remove `-photon-` from the `spark_version` and set `runtime_engine` to `PHOTON`. + + If left unspecified, the runtime engine defaults to standard unless the spark_version + contains -photon-, in which case Photon will be used. + "enum": + - |- + NULL + - |- + STANDARD + - |- + PHOTON +github.com/databricks/databricks-sdk-go/service/compute.S3StorageInfo: + "canned_acl": + "description": |- + (Optional) Set canned access control list for the logs, e.g. `bucket-owner-full-control`. + If `canned_cal` is set, please make sure the cluster iam role has `s3:PutObjectAcl` permission on + the destination bucket and prefix. The full list of possible canned acl can be found at + http://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl. + Please also note that by default only the object owner gets full controls. If you are using cross account + role for writing data, you may want to set `bucket-owner-full-control` to make bucket owner able to + read the logs. + "destination": + "description": |- + S3 destination, e.g. `s3://my-bucket/some-prefix` Note that logs will be delivered using + cluster iam role, please make sure you set cluster iam role and the role has write access to the + destination. Please also note that you cannot use AWS keys to deliver logs. + "enable_encryption": + "description": |- + (Optional) Flag to enable server side encryption, `false` by default. + "encryption_type": + "description": |- + (Optional) The encryption type, it could be `sse-s3` or `sse-kms`. It will be used only when + encryption is enabled and the default type is `sse-s3`. + "endpoint": + "description": |- + S3 endpoint, e.g. `https://s3-us-west-2.amazonaws.com`. Either region or endpoint needs to be set. + If both are set, endpoint will be used. + "kms_key": + "description": |- + (Optional) Kms key which will be used if encryption is enabled and encryption type is set to `sse-kms`. + "region": + "description": |- + S3 region, e.g. `us-west-2`. Either region or endpoint needs to be set. If both are set, + endpoint will be used. +github.com/databricks/databricks-sdk-go/service/compute.VolumesStorageInfo: + "destination": + "description": |- + Unity Catalog Volumes file destination, e.g. `/Volumes/my-init.sh` +github.com/databricks/databricks-sdk-go/service/compute.WorkloadType: + "clients": + "description": |2- + defined what type of clients can use the cluster. E.g. Notebooks, Jobs +github.com/databricks/databricks-sdk-go/service/compute.WorkspaceStorageInfo: + "destination": + "description": |- + workspace files destination, e.g. `/Users/user1@databricks.com/my-init.sh` +github.com/databricks/databricks-sdk-go/service/dashboards.LifecycleState: + "_": + "enum": + - |- + ACTIVE + - |- + TRASHED +github.com/databricks/databricks-sdk-go/service/jobs.CleanRoomsNotebookTask: + "clean_room_name": + "description": |- + The clean room that the notebook belongs to. + "etag": + "description": |- + Checksum to validate the freshness of the notebook resource (i.e. the notebook being run is the latest version). + It can be fetched by calling the :method:cleanroomassets/get API. + "notebook_base_parameters": + "description": |- + Base parameters to be used for the clean room notebook job. + "notebook_name": + "description": |- + Name of the notebook being run. +github.com/databricks/databricks-sdk-go/service/jobs.Condition: + "_": + "enum": + - |- + ANY_UPDATED + - |- + ALL_UPDATED +github.com/databricks/databricks-sdk-go/service/jobs.ConditionTask: + "left": + "description": |- + The left operand of the condition task. Can be either a string value or a job state or parameter reference. + "op": + "description": |- + * `EQUAL_TO`, `NOT_EQUAL` operators perform string comparison of their operands. This means that `“12.0” == “12”` will evaluate to `false`. + * `GREATER_THAN`, `GREATER_THAN_OR_EQUAL`, `LESS_THAN`, `LESS_THAN_OR_EQUAL` operators perform numeric comparison of their operands. `“12.0” >= “12”` will evaluate to `true`, `“10.0” >= “12”` will evaluate to `false`. + + The boolean comparison to task values can be implemented with operators `EQUAL_TO`, `NOT_EQUAL`. If a task value was set to a boolean value, it will be serialized to `“true”` or `“false”` for the comparison. + "right": + "description": |- + The right operand of the condition task. Can be either a string value or a job state or parameter reference. +github.com/databricks/databricks-sdk-go/service/jobs.ConditionTaskOp: + "_": + "description": |- + * `EQUAL_TO`, `NOT_EQUAL` operators perform string comparison of their operands. This means that `“12.0” == “12”` will evaluate to `false`. + * `GREATER_THAN`, `GREATER_THAN_OR_EQUAL`, `LESS_THAN`, `LESS_THAN_OR_EQUAL` operators perform numeric comparison of their operands. `“12.0” >= “12”` will evaluate to `true`, `“10.0” >= “12”` will evaluate to `false`. + + The boolean comparison to task values can be implemented with operators `EQUAL_TO`, `NOT_EQUAL`. If a task value was set to a boolean value, it will be serialized to `“true”` or `“false”` for the comparison. + "enum": + - |- + EQUAL_TO + - |- + GREATER_THAN + - |- + GREATER_THAN_OR_EQUAL + - |- + LESS_THAN + - |- + LESS_THAN_OR_EQUAL + - |- + NOT_EQUAL +github.com/databricks/databricks-sdk-go/service/jobs.Continuous: + "pause_status": + "description": |- + Indicate whether the continuous execution of the job is paused or not. Defaults to UNPAUSED. +github.com/databricks/databricks-sdk-go/service/jobs.CronSchedule: + "pause_status": + "description": |- + Indicate whether this schedule is paused or not. + "quartz_cron_expression": + "description": |- + A Cron expression using Quartz syntax that describes the schedule for a job. See [Cron Trigger](http://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/crontrigger.html) for details. This field is required. + "timezone_id": + "description": |- + A Java timezone ID. The schedule for a job is resolved with respect to this timezone. See [Java TimeZone](https://docs.oracle.com/javase/7/docs/api/java/util/TimeZone.html) for details. This field is required. +github.com/databricks/databricks-sdk-go/service/jobs.DbtTask: + "catalog": + "description": |- + Optional name of the catalog to use. The value is the top level in the 3-level namespace of Unity Catalog (catalog / schema / relation). The catalog value can only be specified if a warehouse_id is specified. Requires dbt-databricks >= 1.1.1. + "commands": + "description": |- + A list of dbt commands to execute. All commands must start with `dbt`. This parameter must not be empty. A maximum of up to 10 commands can be provided. + "profiles_directory": + "description": |- + Optional (relative) path to the profiles directory. Can only be specified if no warehouse_id is specified. If no warehouse_id is specified and this folder is unset, the root directory is used. + "project_directory": + "description": |- + Path to the project directory. Optional for Git sourced tasks, in which + case if no value is provided, the root of the Git repository is used. + "schema": + "description": |- + Optional schema to write to. This parameter is only used when a warehouse_id is also provided. If not provided, the `default` schema is used. + "source": + "description": |- + Optional location type of the project directory. When set to `WORKSPACE`, the project will be retrieved + from the local Databricks workspace. When set to `GIT`, the project will be retrieved from a Git repository + defined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise. + + * `WORKSPACE`: Project is located in Databricks workspace. + * `GIT`: Project is located in cloud Git provider. + "warehouse_id": + "description": |- + ID of the SQL warehouse to connect to. If provided, we automatically generate and provide the profile and connection details to dbt. It can be overridden on a per-command basis by using the `--profiles-dir` command line argument. +github.com/databricks/databricks-sdk-go/service/jobs.FileArrivalTriggerConfiguration: + "min_time_between_triggers_seconds": + "description": |- + If set, the trigger starts a run only after the specified amount of time passed since + the last time the trigger fired. The minimum allowed value is 60 seconds + "url": + "description": |- + URL to be monitored for file arrivals. The path must point to the root or a subpath of the external location. + "wait_after_last_change_seconds": + "description": |- + If set, the trigger starts a run only after no file activity has occurred for the specified amount of time. + This makes it possible to wait for a batch of incoming files to arrive before triggering a run. The + minimum allowed value is 60 seconds. +github.com/databricks/databricks-sdk-go/service/jobs.ForEachTask: + "concurrency": + "description": |- + An optional maximum allowed number of concurrent runs of the task. + Set this value if you want to be able to execute multiple runs of the task concurrently. + "inputs": + "description": |- + Array for task to iterate on. This can be a JSON string or a reference to + an array parameter. + "task": + "description": |- + Configuration for the task that will be run for each element in the array +github.com/databricks/databricks-sdk-go/service/jobs.Format: + "_": + "enum": + - |- + SINGLE_TASK + - |- + MULTI_TASK +github.com/databricks/databricks-sdk-go/service/jobs.GitProvider: + "_": + "enum": + - |- + gitHub + - |- + bitbucketCloud + - |- + azureDevOpsServices + - |- + gitHubEnterprise + - |- + bitbucketServer + - |- + gitLab + - |- + gitLabEnterpriseEdition + - |- + awsCodeCommit +github.com/databricks/databricks-sdk-go/service/jobs.GitSnapshot: + "_": + "description": |- + Read-only state of the remote repository at the time the job was run. This field is only included on job runs. + "used_commit": + "description": |- + Commit that was used to execute the run. If git_branch was specified, this points to the HEAD of the branch at the time of the run; if git_tag was specified, this points to the commit the tag points to. +github.com/databricks/databricks-sdk-go/service/jobs.GitSource: + "_": + "description": |- + An optional specification for a remote Git repository containing the source code used by tasks. Version-controlled source code is supported by notebook, dbt, Python script, and SQL File tasks. + + If `git_source` is set, these tasks retrieve the file from the remote repository by default. However, this behavior can be overridden by setting `source` to `WORKSPACE` on the task. + + Note: dbt and SQL File tasks support only version-controlled sources. If dbt or SQL File tasks are used, `git_source` must be defined on the job. + "git_branch": + "description": |- + Name of the branch to be checked out and used by this job. This field cannot be specified in conjunction with git_tag or git_commit. + "git_commit": + "description": |- + Commit to be checked out and used by this job. This field cannot be specified in conjunction with git_branch or git_tag. + "git_provider": + "description": |- + Unique identifier of the service used to host the Git repository. The value is case insensitive. + "git_snapshot": {} + "git_tag": + "description": |- + Name of the tag to be checked out and used by this job. This field cannot be specified in conjunction with git_branch or git_commit. + "git_url": + "description": |- + URL of the repository to be cloned by this job. + "job_source": + "description": |- + The source of the job specification in the remote repository when the job is source controlled. +github.com/databricks/databricks-sdk-go/service/jobs.JobCluster: + "job_cluster_key": + "description": |- + A unique name for the job cluster. This field is required and must be unique within the job. + `JobTaskSettings` may refer to this field to determine which cluster to launch for the task execution. + "new_cluster": + "description": |- + If new_cluster, a description of a cluster that is created for each task. +github.com/databricks/databricks-sdk-go/service/jobs.JobDeployment: + "kind": + "description": |- + The kind of deployment that manages the job. + + * `BUNDLE`: The job is managed by Databricks Asset Bundle. + "metadata_file_path": + "description": |- + Path of the file that contains deployment metadata. +github.com/databricks/databricks-sdk-go/service/jobs.JobDeploymentKind: + "_": + "description": |- + * `BUNDLE`: The job is managed by Databricks Asset Bundle. + "enum": + - |- + BUNDLE +github.com/databricks/databricks-sdk-go/service/jobs.JobEditMode: + "_": + "description": |- + Edit mode of the job. + + * `UI_LOCKED`: The job is in a locked UI state and cannot be modified. + * `EDITABLE`: The job is in an editable state and can be modified. + "enum": + - |- + UI_LOCKED + - |- + EDITABLE +github.com/databricks/databricks-sdk-go/service/jobs.JobEmailNotifications: + "no_alert_for_skipped_runs": + "description": |- + If true, do not send email to recipients specified in `on_failure` if the run is skipped. + This field is `deprecated`. Please use the `notification_settings.no_alert_for_skipped_runs` field. + "on_duration_warning_threshold_exceeded": + "description": |- + A list of email addresses to be notified when the duration of a run exceeds the threshold specified for the `RUN_DURATION_SECONDS` metric in the `health` field. If no rule for the `RUN_DURATION_SECONDS` metric is specified in the `health` field for the job, notifications are not sent. + "on_failure": + "description": |- + A list of email addresses to be notified when a run unsuccessfully completes. A run is considered to have completed unsuccessfully if it ends with an `INTERNAL_ERROR` `life_cycle_state` or a `FAILED`, or `TIMED_OUT` result_state. If this is not specified on job creation, reset, or update the list is empty, and notifications are not sent. + "on_start": + "description": |- + A list of email addresses to be notified when a run begins. If not specified on job creation, reset, or update, the list is empty, and notifications are not sent. + "on_streaming_backlog_exceeded": + "description": |- + A list of email addresses to notify when any streaming backlog thresholds are exceeded for any stream. + Streaming backlog thresholds can be set in the `health` field using the following metrics: `STREAMING_BACKLOG_BYTES`, `STREAMING_BACKLOG_RECORDS`, `STREAMING_BACKLOG_SECONDS`, or `STREAMING_BACKLOG_FILES`. + Alerting is based on the 10-minute average of these metrics. If the issue persists, notifications are resent every 30 minutes. + "on_success": + "description": |- + A list of email addresses to be notified when a run successfully completes. A run is considered to have completed successfully if it ends with a `TERMINATED` `life_cycle_state` and a `SUCCESS` result_state. If not specified on job creation, reset, or update, the list is empty, and notifications are not sent. +github.com/databricks/databricks-sdk-go/service/jobs.JobEnvironment: + "environment_key": + "description": |- + The key of an environment. It has to be unique within a job. + "spec": {} +github.com/databricks/databricks-sdk-go/service/jobs.JobNotificationSettings: + "no_alert_for_canceled_runs": + "description": |- + If true, do not send notifications to recipients specified in `on_failure` if the run is canceled. + "no_alert_for_skipped_runs": + "description": |- + If true, do not send notifications to recipients specified in `on_failure` if the run is skipped. +github.com/databricks/databricks-sdk-go/service/jobs.JobParameterDefinition: + "default": + "description": |- + Default value of the parameter. + "name": + "description": |- + The name of the defined parameter. May only contain alphanumeric characters, `_`, `-`, and `.` +github.com/databricks/databricks-sdk-go/service/jobs.JobRunAs: + "_": + "description": |- + Write-only setting. Specifies the user or service principal that the job runs as. If not specified, the job runs as the user who created the job. + + Either `user_name` or `service_principal_name` should be specified. If not, an error is thrown. + "service_principal_name": + "description": |- + Application ID of an active service principal. Setting this field requires the `servicePrincipal/user` role. + "user_name": + "description": |- + The email of an active workspace user. Non-admin users can only set this field to their own email. +github.com/databricks/databricks-sdk-go/service/jobs.JobSource: + "_": + "description": |- + The source of the job specification in the remote repository when the job is source controlled. + "dirty_state": + "description": |- + Dirty state indicates the job is not fully synced with the job specification in the remote repository. + + Possible values are: + * `NOT_SYNCED`: The job is not yet synced with the remote job specification. Import the remote job specification from UI to make the job fully synced. + * `DISCONNECTED`: The job is temporary disconnected from the remote job specification and is allowed for live edit. Import the remote job specification again from UI to make the job fully synced. + "import_from_git_branch": + "description": |- + Name of the branch which the job is imported from. + "job_config_path": + "description": |- + Path of the job YAML file that contains the job specification. +github.com/databricks/databricks-sdk-go/service/jobs.JobSourceDirtyState: + "_": + "description": |- + Dirty state indicates the job is not fully synced with the job specification + in the remote repository. + + Possible values are: + * `NOT_SYNCED`: The job is not yet synced with the remote job specification. Import the remote job specification from UI to make the job fully synced. + * `DISCONNECTED`: The job is temporary disconnected from the remote job specification and is allowed for live edit. Import the remote job specification again from UI to make the job fully synced. + "enum": + - |- + NOT_SYNCED + - |- + DISCONNECTED +github.com/databricks/databricks-sdk-go/service/jobs.JobsHealthMetric: + "_": + "description": |- + Specifies the health metric that is being evaluated for a particular health rule. + + * `RUN_DURATION_SECONDS`: Expected total time for a run in seconds. + * `STREAMING_BACKLOG_BYTES`: An estimate of the maximum bytes of data waiting to be consumed across all streams. This metric is in Public Preview. + * `STREAMING_BACKLOG_RECORDS`: An estimate of the maximum offset lag across all streams. This metric is in Public Preview. + * `STREAMING_BACKLOG_SECONDS`: An estimate of the maximum consumer delay across all streams. This metric is in Public Preview. + * `STREAMING_BACKLOG_FILES`: An estimate of the maximum number of outstanding files across all streams. This metric is in Public Preview. + "enum": + - |- + RUN_DURATION_SECONDS + - |- + STREAMING_BACKLOG_BYTES + - |- + STREAMING_BACKLOG_RECORDS + - |- + STREAMING_BACKLOG_SECONDS + - |- + STREAMING_BACKLOG_FILES +github.com/databricks/databricks-sdk-go/service/jobs.JobsHealthOperator: + "_": + "description": |- + Specifies the operator used to compare the health metric value with the specified threshold. + "enum": + - |- + GREATER_THAN +github.com/databricks/databricks-sdk-go/service/jobs.JobsHealthRule: + "metric": {} + "op": {} + "value": + "description": |- + Specifies the threshold value that the health metric should obey to satisfy the health rule. +github.com/databricks/databricks-sdk-go/service/jobs.JobsHealthRules: + "_": + "description": |- + An optional set of health rules that can be defined for this job. + "rules": {} +github.com/databricks/databricks-sdk-go/service/jobs.NotebookTask: + "base_parameters": + "description": |- + Base parameters to be used for each run of this job. If the run is initiated by a call to :method:jobs/run + Now with parameters specified, the two parameters maps are merged. If the same key is specified in + `base_parameters` and in `run-now`, the value from `run-now` is used. + Use [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs. + + If the notebook takes a parameter that is not specified in the job’s `base_parameters` or the `run-now` override parameters, + the default value from the notebook is used. + + Retrieve these parameters in a notebook using [dbutils.widgets.get](https://docs.databricks.com/dev-tools/databricks-utils.html#dbutils-widgets). + + The JSON representation of this field cannot exceed 1MB. + "notebook_path": + "description": |- + The path of the notebook to be run in the Databricks workspace or remote repository. + For notebooks stored in the Databricks workspace, the path must be absolute and begin with a slash. + For notebooks stored in a remote repository, the path must be relative. This field is required. + "source": + "description": |- + Optional location type of the notebook. When set to `WORKSPACE`, the notebook will be retrieved from the local Databricks workspace. When set to `GIT`, the notebook will be retrieved from a Git repository + defined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise. + * `WORKSPACE`: Notebook is located in Databricks workspace. + * `GIT`: Notebook is located in cloud Git provider. + "warehouse_id": + "description": |- + Optional `warehouse_id` to run the notebook on a SQL warehouse. Classic SQL warehouses are NOT supported, please use serverless or pro SQL warehouses. + + Note that SQL warehouses only support SQL cells; if the notebook contains non-SQL cells, the run will fail. +github.com/databricks/databricks-sdk-go/service/jobs.PauseStatus: + "_": + "enum": + - |- + UNPAUSED + - |- + PAUSED +github.com/databricks/databricks-sdk-go/service/jobs.PeriodicTriggerConfiguration: + "interval": + "description": |- + The interval at which the trigger should run. + "unit": + "description": |- + The unit of time for the interval. +github.com/databricks/databricks-sdk-go/service/jobs.PeriodicTriggerConfigurationTimeUnit: + "_": + "enum": + - |- + HOURS + - |- + DAYS + - |- + WEEKS +github.com/databricks/databricks-sdk-go/service/jobs.PipelineParams: + "full_refresh": + "description": |- + If true, triggers a full refresh on the delta live table. +github.com/databricks/databricks-sdk-go/service/jobs.PipelineTask: + "full_refresh": + "description": |- + If true, triggers a full refresh on the delta live table. + "pipeline_id": + "description": |- + The full name of the pipeline task to execute. +github.com/databricks/databricks-sdk-go/service/jobs.PythonWheelTask: + "entry_point": + "description": |- + Named entry point to use, if it does not exist in the metadata of the package it executes the function from the package directly using `$packageName.$entryPoint()` + "named_parameters": + "description": |- + Command-line parameters passed to Python wheel task in the form of `["--name=task", "--data=dbfs:/path/to/data.json"]`. Leave it empty if `parameters` is not null. + "package_name": + "description": |- + Name of the package to execute + "parameters": + "description": |- + Command-line parameters passed to Python wheel task. Leave it empty if `named_parameters` is not null. +github.com/databricks/databricks-sdk-go/service/jobs.QueueSettings: + "enabled": + "description": |- + If true, enable queueing for the job. This is a required field. +github.com/databricks/databricks-sdk-go/service/jobs.RunIf: + "_": + "description": |- + An optional value indicating the condition that determines whether the task should be run once its dependencies have been completed. When omitted, defaults to `ALL_SUCCESS`. + + Possible values are: + * `ALL_SUCCESS`: All dependencies have executed and succeeded + * `AT_LEAST_ONE_SUCCESS`: At least one dependency has succeeded + * `NONE_FAILED`: None of the dependencies have failed and at least one was executed + * `ALL_DONE`: All dependencies have been completed + * `AT_LEAST_ONE_FAILED`: At least one dependency failed + * `ALL_FAILED`: ALl dependencies have failed + "enum": + - |- + ALL_SUCCESS + - |- + ALL_DONE + - |- + NONE_FAILED + - |- + AT_LEAST_ONE_SUCCESS + - |- + ALL_FAILED + - |- + AT_LEAST_ONE_FAILED +github.com/databricks/databricks-sdk-go/service/jobs.RunJobTask: + "dbt_commands": + "description": |- + An array of commands to execute for jobs with the dbt task, for example `"dbt_commands": ["dbt deps", "dbt seed", "dbt deps", "dbt seed", "dbt run"]` + "jar_params": + "description": |- + A list of parameters for jobs with Spark JAR tasks, for example `"jar_params": ["john doe", "35"]`. + The parameters are used to invoke the main function of the main class specified in the Spark JAR task. + If not specified upon `run-now`, it defaults to an empty list. + jar_params cannot be specified in conjunction with notebook_params. + The JSON representation of this field (for example `{"jar_params":["john doe","35"]}`) cannot exceed 10,000 bytes. + + Use [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs. + "job_id": + "description": |- + ID of the job to trigger. + "job_parameters": + "description": |- + Job-level parameters used to trigger the job. + "notebook_params": + "description": |- + A map from keys to values for jobs with notebook task, for example `"notebook_params": {"name": "john doe", "age": "35"}`. + The map is passed to the notebook and is accessible through the [dbutils.widgets.get](https://docs.databricks.com/dev-tools/databricks-utils.html) function. + + If not specified upon `run-now`, the triggered run uses the job’s base parameters. + + notebook_params cannot be specified in conjunction with jar_params. + + Use [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs. + + The JSON representation of this field (for example `{"notebook_params":{"name":"john doe","age":"35"}}`) cannot exceed 10,000 bytes. + "pipeline_params": + "description": |- + Controls whether the pipeline should perform a full refresh + "python_named_params": {} + "python_params": + "description": |- + A list of parameters for jobs with Python tasks, for example `"python_params": ["john doe", "35"]`. + The parameters are passed to Python file as command-line parameters. If specified upon `run-now`, it would overwrite + the parameters specified in job setting. The JSON representation of this field (for example `{"python_params":["john doe","35"]}`) + cannot exceed 10,000 bytes. + + Use [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs. + + Important + + These parameters accept only Latin characters (ASCII character set). Using non-ASCII characters returns an error. + Examples of invalid, non-ASCII characters are Chinese, Japanese kanjis, and emojis. + "spark_submit_params": + "description": |- + A list of parameters for jobs with spark submit task, for example `"spark_submit_params": ["--class", "org.apache.spark.examples.SparkPi"]`. + The parameters are passed to spark-submit script as command-line parameters. If specified upon `run-now`, it would overwrite the + parameters specified in job setting. The JSON representation of this field (for example `{"python_params":["john doe","35"]}`) + cannot exceed 10,000 bytes. + + Use [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs + + Important + + These parameters accept only Latin characters (ASCII character set). Using non-ASCII characters returns an error. + Examples of invalid, non-ASCII characters are Chinese, Japanese kanjis, and emojis. + "sql_params": + "description": |- + A map from keys to values for jobs with SQL task, for example `"sql_params": {"name": "john doe", "age": "35"}`. The SQL alert task does not support custom parameters. +github.com/databricks/databricks-sdk-go/service/jobs.Source: + "_": + "description": |- + Optional location type of the SQL file. When set to `WORKSPACE`, the SQL file will be retrieved\ + from the local Databricks workspace. When set to `GIT`, the SQL file will be retrieved from a Git repository + defined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise. + + * `WORKSPACE`: SQL file is located in Databricks workspace. + * `GIT`: SQL file is located in cloud Git provider. + "enum": + - |- + WORKSPACE + - |- + GIT +github.com/databricks/databricks-sdk-go/service/jobs.SparkJarTask: + "jar_uri": + "description": |- + Deprecated since 04/2016. Provide a `jar` through the `libraries` field instead. For an example, see :method:jobs/create. + "main_class_name": + "description": |- + The full name of the class containing the main method to be executed. This class must be contained in a JAR provided as a library. + + The code must use `SparkContext.getOrCreate` to obtain a Spark context; otherwise, runs of the job fail. + "parameters": + "description": |- + Parameters passed to the main method. + + Use [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs. +github.com/databricks/databricks-sdk-go/service/jobs.SparkPythonTask: + "parameters": + "description": |- + Command line parameters passed to the Python file. + + Use [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs. + "python_file": + "description": |- + The Python file to be executed. Cloud file URIs (such as dbfs:/, s3:/, adls:/, gcs:/) and workspace paths are supported. For python files stored in the Databricks workspace, the path must be absolute and begin with `/`. For files stored in a remote repository, the path must be relative. This field is required. + "source": + "description": |- + Optional location type of the Python file. When set to `WORKSPACE` or not specified, the file will be retrieved from the local + Databricks workspace or cloud location (if the `python_file` has a URI format). When set to `GIT`, + the Python file will be retrieved from a Git repository defined in `git_source`. + + * `WORKSPACE`: The Python file is located in a Databricks workspace or at a cloud filesystem URI. + * `GIT`: The Python file is located in a remote Git repository. +github.com/databricks/databricks-sdk-go/service/jobs.SparkSubmitTask: + "parameters": + "description": |- + Command-line parameters passed to spark submit. + + Use [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs. +github.com/databricks/databricks-sdk-go/service/jobs.SqlTask: + "alert": + "description": |- + If alert, indicates that this job must refresh a SQL alert. + "dashboard": + "description": |- + If dashboard, indicates that this job must refresh a SQL dashboard. + "file": + "description": |- + If file, indicates that this job runs a SQL file in a remote Git repository. + "parameters": + "description": |- + Parameters to be used for each run of this job. The SQL alert task does not support custom parameters. + "query": + "description": |- + If query, indicates that this job must execute a SQL query. + "warehouse_id": + "description": |- + The canonical identifier of the SQL warehouse. Recommended to use with serverless or pro SQL warehouses. Classic SQL warehouses are only supported for SQL alert, dashboard and query tasks and are limited to scheduled single-task jobs. +github.com/databricks/databricks-sdk-go/service/jobs.SqlTaskAlert: + "alert_id": + "description": |- + The canonical identifier of the SQL alert. + "pause_subscriptions": + "description": |- + If true, the alert notifications are not sent to subscribers. + "subscriptions": + "description": |- + If specified, alert notifications are sent to subscribers. +github.com/databricks/databricks-sdk-go/service/jobs.SqlTaskDashboard: + "custom_subject": + "description": |- + Subject of the email sent to subscribers of this task. + "dashboard_id": + "description": |- + The canonical identifier of the SQL dashboard. + "pause_subscriptions": + "description": |- + If true, the dashboard snapshot is not taken, and emails are not sent to subscribers. + "subscriptions": + "description": |- + If specified, dashboard snapshots are sent to subscriptions. +github.com/databricks/databricks-sdk-go/service/jobs.SqlTaskFile: + "path": + "description": |- + Path of the SQL file. Must be relative if the source is a remote Git repository and absolute for workspace paths. + "source": + "description": |- + Optional location type of the SQL file. When set to `WORKSPACE`, the SQL file will be retrieved + from the local Databricks workspace. When set to `GIT`, the SQL file will be retrieved from a Git repository + defined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise. + + * `WORKSPACE`: SQL file is located in Databricks workspace. + * `GIT`: SQL file is located in cloud Git provider. +github.com/databricks/databricks-sdk-go/service/jobs.SqlTaskQuery: + "query_id": + "description": |- + The canonical identifier of the SQL query. +github.com/databricks/databricks-sdk-go/service/jobs.SqlTaskSubscription: + "destination_id": + "description": |- + The canonical identifier of the destination to receive email notification. This parameter is mutually exclusive with user_name. You cannot set both destination_id and user_name for subscription notifications. + "user_name": + "description": |- + The user name to receive the subscription email. This parameter is mutually exclusive with destination_id. You cannot set both destination_id and user_name for subscription notifications. +github.com/databricks/databricks-sdk-go/service/jobs.TableUpdateTriggerConfiguration: + "condition": + "description": |- + The table(s) condition based on which to trigger a job run. + "min_time_between_triggers_seconds": + "description": |- + If set, the trigger starts a run only after the specified amount of time has passed since + the last time the trigger fired. The minimum allowed value is 60 seconds. + "table_names": + "description": |- + A list of Delta tables to monitor for changes. The table name must be in the format `catalog_name.schema_name.table_name`. + "wait_after_last_change_seconds": + "description": |- + If set, the trigger starts a run only after no table updates have occurred for the specified time + and can be used to wait for a series of table updates before triggering a run. The + minimum allowed value is 60 seconds. +github.com/databricks/databricks-sdk-go/service/jobs.Task: + "clean_rooms_notebook_task": + "description": |- + The task runs a [clean rooms](https://docs.databricks.com/en/clean-rooms/index.html) notebook + when the `clean_rooms_notebook_task` field is present. + "condition_task": + "description": |- + The task evaluates a condition that can be used to control the execution of other tasks when the `condition_task` field is present. + The condition task does not require a cluster to execute and does not support retries or notifications. + "dbt_task": + "description": |- + The task runs one or more dbt commands when the `dbt_task` field is present. The dbt task requires both Databricks SQL and the ability to use a serverless or a pro SQL warehouse. + "depends_on": + "description": |- + An optional array of objects specifying the dependency graph of the task. All tasks specified in this field must complete before executing this task. The task will run only if the `run_if` condition is true. + The key is `task_key`, and the value is the name assigned to the dependent task. + "description": + "description": |- + An optional description for this task. + "disable_auto_optimization": + "description": |- + An option to disable auto optimization in serverless + "email_notifications": + "description": |- + An optional set of email addresses that is notified when runs of this task begin or complete as well as when this task is deleted. The default behavior is to not send any emails. + "environment_key": + "description": |- + The key that references an environment spec in a job. This field is required for Python script, Python wheel and dbt tasks when using serverless compute. + "existing_cluster_id": + "description": |- + If existing_cluster_id, the ID of an existing cluster that is used for all runs. + When running jobs or tasks on an existing cluster, you may need to manually restart + the cluster if it stops responding. We suggest running jobs and tasks on new clusters for + greater reliability + "for_each_task": + "description": |- + The task executes a nested task for every input provided when the `for_each_task` field is present. + "health": {} + "job_cluster_key": + "description": |- + If job_cluster_key, this task is executed reusing the cluster specified in `job.settings.job_clusters`. + "libraries": + "description": |- + An optional list of libraries to be installed on the cluster. + The default value is an empty list. + "max_retries": + "description": |- + An optional maximum number of times to retry an unsuccessful run. A run is considered to be unsuccessful if it completes with the `FAILED` result_state or `INTERNAL_ERROR` `life_cycle_state`. The value `-1` means to retry indefinitely and the value `0` means to never retry. + "min_retry_interval_millis": + "description": |- + An optional minimal interval in milliseconds between the start of the failed run and the subsequent retry run. The default behavior is that unsuccessful runs are immediately retried. + "new_cluster": + "description": |- + If new_cluster, a description of a new cluster that is created for each run. + "notebook_task": + "description": |- + The task runs a notebook when the `notebook_task` field is present. + "notification_settings": + "description": |- + Optional notification settings that are used when sending notifications to each of the `email_notifications` and `webhook_notifications` for this task. + "pipeline_task": + "description": |- + The task triggers a pipeline update when the `pipeline_task` field is present. Only pipelines configured to use triggered more are supported. + "python_wheel_task": + "description": |- + The task runs a Python wheel when the `python_wheel_task` field is present. + "retry_on_timeout": + "description": |- + An optional policy to specify whether to retry a job when it times out. The default behavior + is to not retry on timeout. + "run_if": + "description": |- + An optional value specifying the condition determining whether the task is run once its dependencies have been completed. + + * `ALL_SUCCESS`: All dependencies have executed and succeeded + * `AT_LEAST_ONE_SUCCESS`: At least one dependency has succeeded + * `NONE_FAILED`: None of the dependencies have failed and at least one was executed + * `ALL_DONE`: All dependencies have been completed + * `AT_LEAST_ONE_FAILED`: At least one dependency failed + * `ALL_FAILED`: ALl dependencies have failed + "run_job_task": + "description": |- + The task triggers another job when the `run_job_task` field is present. + "spark_jar_task": + "description": |- + The task runs a JAR when the `spark_jar_task` field is present. + "spark_python_task": + "description": |- + The task runs a Python file when the `spark_python_task` field is present. + "spark_submit_task": + "description": |- + (Legacy) The task runs the spark-submit script when the `spark_submit_task` field is present. This task can run only on new clusters and is not compatible with serverless compute. + + In the `new_cluster` specification, `libraries` and `spark_conf` are not supported. Instead, use `--jars` and `--py-files` to add Java and Python libraries and `--conf` to set the Spark configurations. + + `master`, `deploy-mode`, and `executor-cores` are automatically configured by Databricks; you _cannot_ specify them in parameters. + + By default, the Spark submit job uses all available memory (excluding reserved memory for Databricks services). You can set `--driver-memory`, and `--executor-memory` to a smaller value to leave some room for off-heap usage. + + The `--jars`, `--py-files`, `--files` arguments support DBFS and S3 paths. + "sql_task": + "description": |- + The task runs a SQL query or file, or it refreshes a SQL alert or a legacy SQL dashboard when the `sql_task` field is present. + "task_key": + "description": |- + A unique name for the task. This field is used to refer to this task from other tasks. + This field is required and must be unique within its parent job. + On Update or Reset, this field is used to reference the tasks to be updated or reset. + "timeout_seconds": + "description": |- + An optional timeout applied to each run of this job task. A value of `0` means no timeout. + "webhook_notifications": + "description": |- + A collection of system notification IDs to notify when runs of this task begin or complete. The default behavior is to not send any system notifications. +github.com/databricks/databricks-sdk-go/service/jobs.TaskDependency: + "outcome": + "description": |- + Can only be specified on condition task dependencies. The outcome of the dependent task that must be met for this task to run. + "task_key": + "description": |- + The name of the task this task depends on. +github.com/databricks/databricks-sdk-go/service/jobs.TaskEmailNotifications: + "no_alert_for_skipped_runs": + "description": |- + If true, do not send email to recipients specified in `on_failure` if the run is skipped. + This field is `deprecated`. Please use the `notification_settings.no_alert_for_skipped_runs` field. + "on_duration_warning_threshold_exceeded": + "description": |- + A list of email addresses to be notified when the duration of a run exceeds the threshold specified for the `RUN_DURATION_SECONDS` metric in the `health` field. If no rule for the `RUN_DURATION_SECONDS` metric is specified in the `health` field for the job, notifications are not sent. + "on_failure": + "description": |- + A list of email addresses to be notified when a run unsuccessfully completes. A run is considered to have completed unsuccessfully if it ends with an `INTERNAL_ERROR` `life_cycle_state` or a `FAILED`, or `TIMED_OUT` result_state. If this is not specified on job creation, reset, or update the list is empty, and notifications are not sent. + "on_start": + "description": |- + A list of email addresses to be notified when a run begins. If not specified on job creation, reset, or update, the list is empty, and notifications are not sent. + "on_streaming_backlog_exceeded": + "description": |- + A list of email addresses to notify when any streaming backlog thresholds are exceeded for any stream. + Streaming backlog thresholds can be set in the `health` field using the following metrics: `STREAMING_BACKLOG_BYTES`, `STREAMING_BACKLOG_RECORDS`, `STREAMING_BACKLOG_SECONDS`, or `STREAMING_BACKLOG_FILES`. + Alerting is based on the 10-minute average of these metrics. If the issue persists, notifications are resent every 30 minutes. + "on_success": + "description": |- + A list of email addresses to be notified when a run successfully completes. A run is considered to have completed successfully if it ends with a `TERMINATED` `life_cycle_state` and a `SUCCESS` result_state. If not specified on job creation, reset, or update, the list is empty, and notifications are not sent. +github.com/databricks/databricks-sdk-go/service/jobs.TaskNotificationSettings: + "alert_on_last_attempt": + "description": |- + If true, do not send notifications to recipients specified in `on_start` for the retried runs and do not send notifications to recipients specified in `on_failure` until the last retry of the run. + "no_alert_for_canceled_runs": + "description": |- + If true, do not send notifications to recipients specified in `on_failure` if the run is canceled. + "no_alert_for_skipped_runs": + "description": |- + If true, do not send notifications to recipients specified in `on_failure` if the run is skipped. +github.com/databricks/databricks-sdk-go/service/jobs.TriggerSettings: + "file_arrival": + "description": |- + File arrival trigger settings. + "pause_status": + "description": |- + Whether this trigger is paused or not. + "periodic": + "description": |- + Periodic trigger settings. + "table": + "description": |- + Old table trigger settings name. Deprecated in favor of `table_update`. + "table_update": {} +github.com/databricks/databricks-sdk-go/service/jobs.Webhook: + "id": {} +github.com/databricks/databricks-sdk-go/service/jobs.WebhookNotifications: + "on_duration_warning_threshold_exceeded": + "description": |- + An optional list of system notification IDs to call when the duration of a run exceeds the threshold specified for the `RUN_DURATION_SECONDS` metric in the `health` field. A maximum of 3 destinations can be specified for the `on_duration_warning_threshold_exceeded` property. + "on_failure": + "description": |- + An optional list of system notification IDs to call when the run fails. A maximum of 3 destinations can be specified for the `on_failure` property. + "on_start": + "description": |- + An optional list of system notification IDs to call when the run starts. A maximum of 3 destinations can be specified for the `on_start` property. + "on_streaming_backlog_exceeded": + "description": |- + An optional list of system notification IDs to call when any streaming backlog thresholds are exceeded for any stream. + Streaming backlog thresholds can be set in the `health` field using the following metrics: `STREAMING_BACKLOG_BYTES`, `STREAMING_BACKLOG_RECORDS`, `STREAMING_BACKLOG_SECONDS`, or `STREAMING_BACKLOG_FILES`. + Alerting is based on the 10-minute average of these metrics. If the issue persists, notifications are resent every 30 minutes. + A maximum of 3 destinations can be specified for the `on_streaming_backlog_exceeded` property. + "on_success": + "description": |- + An optional list of system notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified for the `on_success` property. +github.com/databricks/databricks-sdk-go/service/ml.ExperimentTag: + "key": + "description": |- + The tag key. + "value": + "description": |- + The tag value. +github.com/databricks/databricks-sdk-go/service/ml.ModelTag: + "key": + "description": |- + The tag key. + "value": + "description": |- + The tag value. +github.com/databricks/databricks-sdk-go/service/ml.ModelVersion: + "creation_timestamp": + "description": |- + Timestamp recorded when this `model_version` was created. + "current_stage": + "description": |- + Current stage for this `model_version`. + "description": + "description": |- + Description of this `model_version`. + "last_updated_timestamp": + "description": |- + Timestamp recorded when metadata for this `model_version` was last updated. + "name": + "description": |- + Unique name of the model + "run_id": + "description": |- + MLflow run ID used when creating `model_version`, if `source` was generated by an + experiment run stored in MLflow tracking server. + "run_link": + "description": |- + Run Link: Direct link to the run that generated this version + "source": + "description": |- + URI indicating the location of the source model artifacts, used when creating `model_version` + "status": + "description": |- + Current status of `model_version` + "status_message": + "description": |- + Details on current `status`, if it is pending or failed. + "tags": + "description": |- + Tags: Additional metadata key-value pairs for this `model_version`. + "user_id": + "description": |- + User that created this `model_version`. + "version": + "description": |- + Model's version number. +github.com/databricks/databricks-sdk-go/service/ml.ModelVersionStatus: + "_": + "description": |- + Current status of `model_version` + "enum": + - |- + PENDING_REGISTRATION + - |- + FAILED_REGISTRATION + - |- + READY +github.com/databricks/databricks-sdk-go/service/ml.ModelVersionTag: + "key": + "description": |- + The tag key. + "value": + "description": |- + The tag value. +github.com/databricks/databricks-sdk-go/service/pipelines.CronTrigger: + "quartz_cron_schedule": {} + "timezone_id": {} +github.com/databricks/databricks-sdk-go/service/pipelines.DeploymentKind: + "_": + "description": | + The deployment method that manages the pipeline: + - BUNDLE: The pipeline is managed by a Databricks Asset Bundle. + "enum": + - |- + BUNDLE +github.com/databricks/databricks-sdk-go/service/pipelines.FileLibrary: + "path": + "description": |- + The absolute path of the file. +github.com/databricks/databricks-sdk-go/service/pipelines.Filters: + "exclude": + "description": |- + Paths to exclude. + "include": + "description": |- + Paths to include. +github.com/databricks/databricks-sdk-go/service/pipelines.IngestionConfig: + "report": + "description": |- + Select a specific source report. + "schema": + "description": |- + Select all tables from a specific source schema. + "table": + "description": |- + Select a specific source table. +github.com/databricks/databricks-sdk-go/service/pipelines.IngestionGatewayPipelineDefinition: + "connection_id": + "description": |- + [Deprecated, use connection_name instead] Immutable. The Unity Catalog connection that this gateway pipeline uses to communicate with the source. + "connection_name": + "description": |- + Immutable. The Unity Catalog connection that this gateway pipeline uses to communicate with the source. + "gateway_storage_catalog": + "description": |- + Required, Immutable. The name of the catalog for the gateway pipeline's storage location. + "gateway_storage_name": + "description": | + Optional. The Unity Catalog-compatible name for the gateway storage location. + This is the destination to use for the data that is extracted by the gateway. + Delta Live Tables system will automatically create the storage location under the catalog and schema. + "gateway_storage_schema": + "description": |- + Required, Immutable. The name of the schema for the gateway pipelines's storage location. +github.com/databricks/databricks-sdk-go/service/pipelines.IngestionPipelineDefinition: + "connection_name": + "description": |- + Immutable. The Unity Catalog connection that this ingestion pipeline uses to communicate with the source. This is used with connectors for applications like Salesforce, Workday, and so on. + "ingestion_gateway_id": + "description": |- + Immutable. Identifier for the gateway that is used by this ingestion pipeline to communicate with the source database. This is used with connectors to databases like SQL Server. + "objects": + "description": |- + Required. Settings specifying tables to replicate and the destination for the replicated tables. + "table_configuration": + "description": |- + Configuration settings to control the ingestion of tables. These settings are applied to all tables in the pipeline. +github.com/databricks/databricks-sdk-go/service/pipelines.ManualTrigger: {} +github.com/databricks/databricks-sdk-go/service/pipelines.NotebookLibrary: + "path": + "description": |- + The absolute path of the notebook. +github.com/databricks/databricks-sdk-go/service/pipelines.Notifications: + "alerts": + "description": | + A list of alerts that trigger the sending of notifications to the configured + destinations. The supported alerts are: + + * `on-update-success`: A pipeline update completes successfully. + * `on-update-failure`: Each time a pipeline update fails. + * `on-update-fatal-failure`: A pipeline update fails with a non-retryable (fatal) error. + * `on-flow-failure`: A single data flow fails. + "email_recipients": + "description": | + A list of email addresses notified when a configured alert is triggered. +github.com/databricks/databricks-sdk-go/service/pipelines.PipelineCluster: + "apply_policy_default_values": + "description": |- + Note: This field won't be persisted. Only API users will check this field. + "autoscale": + "description": |- + Parameters needed in order to automatically scale clusters up and down based on load. + Note: autoscaling works best with DB runtime versions 3.0 or later. + "aws_attributes": + "description": |- + Attributes related to clusters running on Amazon Web Services. + If not specified at cluster creation, a set of default values will be used. + "azure_attributes": + "description": |- + Attributes related to clusters running on Microsoft Azure. + If not specified at cluster creation, a set of default values will be used. + "cluster_log_conf": + "description": | + The configuration for delivering spark logs to a long-term storage destination. + Only dbfs destinations are supported. Only one destination can be specified + for one cluster. If the conf is given, the logs will be delivered to the destination every + `5 mins`. The destination of driver logs is `$destination/$clusterId/driver`, while + the destination of executor logs is `$destination/$clusterId/executor`. + "custom_tags": + "description": |- + Additional tags for cluster resources. Databricks will tag all cluster resources (e.g., AWS + instances and EBS volumes) with these tags in addition to `default_tags`. Notes: + + - Currently, Databricks allows at most 45 custom tags + + - Clusters can only reuse cloud resources if the resources' tags are a subset of the cluster tags + "driver_instance_pool_id": + "description": |- + The optional ID of the instance pool for the driver of the cluster belongs. + The pool cluster uses the instance pool with id (instance_pool_id) if the driver pool is not + assigned. + "driver_node_type_id": + "description": |- + The node type of the Spark driver. + Note that this field is optional; if unset, the driver node type will be set as the same value + as `node_type_id` defined above. + "enable_local_disk_encryption": + "description": |- + Whether to enable local disk encryption for the cluster. + "gcp_attributes": + "description": |- + Attributes related to clusters running on Google Cloud Platform. + If not specified at cluster creation, a set of default values will be used. + "init_scripts": + "description": |- + The configuration for storing init scripts. Any number of destinations can be specified. The scripts are executed sequentially in the order provided. If `cluster_log_conf` is specified, init script logs are sent to `//init_scripts`. + "instance_pool_id": + "description": |- + The optional ID of the instance pool to which the cluster belongs. + "label": + "description": |- + A label for the cluster specification, either `default` to configure the default cluster, or `maintenance` to configure the maintenance cluster. This field is optional. The default value is `default`. + "node_type_id": + "description": | + This field encodes, through a single value, the resources available to each of + the Spark nodes in this cluster. For example, the Spark nodes can be provisioned + and optimized for memory or compute intensive workloads. A list of available node + types can be retrieved by using the :method:clusters/listNodeTypes API call. + "num_workers": + "description": |- + Number of worker nodes that this cluster should have. A cluster has one Spark Driver + and `num_workers` Executors for a total of `num_workers` + 1 Spark nodes. + + Note: When reading the properties of a cluster, this field reflects the desired number + of workers rather than the actual current number of workers. For instance, if a cluster + is resized from 5 to 10 workers, this field will immediately be updated to reflect + the target size of 10 workers, whereas the workers listed in `spark_info` will gradually + increase from 5 to 10 as the new nodes are provisioned. + "policy_id": + "description": |- + The ID of the cluster policy used to create the cluster if applicable. + "spark_conf": + "description": | + An object containing a set of optional, user-specified Spark configuration key-value pairs. + See :method:clusters/create for more details. + "spark_env_vars": + "description": |- + An object containing a set of optional, user-specified environment variable key-value pairs. + Please note that key-value pair of the form (X,Y) will be exported as is (i.e., + `export X='Y'`) while launching the driver and workers. + + In order to specify an additional set of `SPARK_DAEMON_JAVA_OPTS`, we recommend appending + them to `$SPARK_DAEMON_JAVA_OPTS` as shown in the example below. This ensures that all + default databricks managed environmental variables are included as well. + + Example Spark environment variables: + `{"SPARK_WORKER_MEMORY": "28000m", "SPARK_LOCAL_DIRS": "/local_disk0"}` or + `{"SPARK_DAEMON_JAVA_OPTS": "$SPARK_DAEMON_JAVA_OPTS -Dspark.shuffle.service.enabled=true"}` + "ssh_public_keys": + "description": |- + SSH public key contents that will be added to each Spark node in this cluster. The + corresponding private keys can be used to login with the user name `ubuntu` on port `2200`. + Up to 10 keys can be specified. +github.com/databricks/databricks-sdk-go/service/pipelines.PipelineClusterAutoscale: + "max_workers": + "description": |- + The maximum number of workers to which the cluster can scale up when overloaded. `max_workers` must be strictly greater than `min_workers`. + "min_workers": + "description": |- + The minimum number of workers the cluster can scale down to when underutilized. + It is also the initial number of workers the cluster will have after creation. + "mode": + "description": | + Databricks Enhanced Autoscaling optimizes cluster utilization by automatically + allocating cluster resources based on workload volume, with minimal impact to + the data processing latency of your pipelines. Enhanced Autoscaling is available + for `updates` clusters only. The legacy autoscaling feature is used for `maintenance` + clusters. +github.com/databricks/databricks-sdk-go/service/pipelines.PipelineClusterAutoscaleMode: + "_": + "description": | + Databricks Enhanced Autoscaling optimizes cluster utilization by automatically + allocating cluster resources based on workload volume, with minimal impact to + the data processing latency of your pipelines. Enhanced Autoscaling is available + for `updates` clusters only. The legacy autoscaling feature is used for `maintenance` + clusters. + "enum": + - |- + ENHANCED + - |- + LEGACY +github.com/databricks/databricks-sdk-go/service/pipelines.PipelineDeployment: + "kind": + "description": |- + The deployment method that manages the pipeline. + "metadata_file_path": + "description": |- + The path to the file containing metadata about the deployment. +github.com/databricks/databricks-sdk-go/service/pipelines.PipelineLibrary: + "file": + "description": | + The path to a file that defines a pipeline and is stored in the Databricks Repos. + "jar": + "description": | + URI of the jar to be installed. Currently only DBFS is supported. + "maven": + "description": | + Specification of a maven library to be installed. + "notebook": + "description": | + The path to a notebook that defines a pipeline and is stored in the Databricks workspace. + "whl": + "description": |- + URI of the whl to be installed. +github.com/databricks/databricks-sdk-go/service/pipelines.PipelineTrigger: + "cron": {} + "manual": {} +github.com/databricks/databricks-sdk-go/service/pipelines.ReportSpec: + "destination_catalog": + "description": |- + Required. Destination catalog to store table. + "destination_schema": + "description": |- + Required. Destination schema to store table. + "destination_table": + "description": |- + Required. Destination table name. The pipeline fails if a table with that name already exists. + "source_url": + "description": |- + Required. Report URL in the source system. + "table_configuration": + "description": |- + Configuration settings to control the ingestion of tables. These settings override the table_configuration defined in the IngestionPipelineDefinition object. +github.com/databricks/databricks-sdk-go/service/pipelines.RestartWindow: + "days_of_week": + "description": |- + Days of week in which the restart is allowed to happen (within a five-hour window starting at start_hour). + If not specified all days of the week will be used. + "start_hour": + "description": |- + An integer between 0 and 23 denoting the start hour for the restart window in the 24-hour day. + Continuous pipeline restart is triggered only within a five-hour window starting at this hour. + "time_zone_id": + "description": |- + Time zone id of restart window. See https://docs.databricks.com/sql/language-manual/sql-ref-syntax-aux-conf-mgmt-set-timezone.html for details. + If not specified, UTC will be used. +github.com/databricks/databricks-sdk-go/service/pipelines.RestartWindowDaysOfWeek: + "_": + "description": |- + Days of week in which the restart is allowed to happen (within a five-hour window starting at start_hour). + If not specified all days of the week will be used. + "enum": + - |- + MONDAY + - |- + TUESDAY + - |- + WEDNESDAY + - |- + THURSDAY + - |- + FRIDAY + - |- + SATURDAY + - |- + SUNDAY +github.com/databricks/databricks-sdk-go/service/pipelines.SchemaSpec: + "destination_catalog": + "description": |- + Required. Destination catalog to store tables. + "destination_schema": + "description": |- + Required. Destination schema to store tables in. Tables with the same name as the source tables are created in this destination schema. The pipeline fails If a table with the same name already exists. + "source_catalog": + "description": |- + The source catalog name. Might be optional depending on the type of source. + "source_schema": + "description": |- + Required. Schema name in the source database. + "table_configuration": + "description": |- + Configuration settings to control the ingestion of tables. These settings are applied to all tables in this schema and override the table_configuration defined in the IngestionPipelineDefinition object. +github.com/databricks/databricks-sdk-go/service/pipelines.TableSpec: + "destination_catalog": + "description": |- + Required. Destination catalog to store table. + "destination_schema": + "description": |- + Required. Destination schema to store table. + "destination_table": + "description": |- + Optional. Destination table name. The pipeline fails if a table with that name already exists. If not set, the source table name is used. + "source_catalog": + "description": |- + Source catalog name. Might be optional depending on the type of source. + "source_schema": + "description": |- + Schema name in the source database. Might be optional depending on the type of source. + "source_table": + "description": |- + Required. Table name in the source database. + "table_configuration": + "description": |- + Configuration settings to control the ingestion of tables. These settings override the table_configuration defined in the IngestionPipelineDefinition object and the SchemaSpec. +github.com/databricks/databricks-sdk-go/service/pipelines.TableSpecificConfig: + "primary_keys": + "description": |- + The primary key of the table used to apply changes. + "salesforce_include_formula_fields": + "description": |- + If true, formula fields defined in the table are included in the ingestion. This setting is only valid for the Salesforce connector + "scd_type": + "description": |- + The SCD type to use to ingest the table. + "sequence_by": + "description": |- + The column names specifying the logical order of events in the source data. Delta Live Tables uses this sequencing to handle change events that arrive out of order. +github.com/databricks/databricks-sdk-go/service/pipelines.TableSpecificConfigScdType: + "_": + "description": |- + The SCD type to use to ingest the table. + "enum": + - |- + SCD_TYPE_1 + - |- + SCD_TYPE_2 +github.com/databricks/databricks-sdk-go/service/serving.Ai21LabsConfig: + "ai21labs_api_key": + "description": |- + The Databricks secret key reference for an AI21 Labs API key. If you prefer to paste your API key directly, see `ai21labs_api_key_plaintext`. You must provide an API key using one of the following fields: `ai21labs_api_key` or `ai21labs_api_key_plaintext`. + "ai21labs_api_key_plaintext": + "description": |- + An AI21 Labs API key provided as a plaintext string. If you prefer to reference your key using Databricks Secrets, see `ai21labs_api_key`. You must provide an API key using one of the following fields: `ai21labs_api_key` or `ai21labs_api_key_plaintext`. +github.com/databricks/databricks-sdk-go/service/serving.AiGatewayConfig: + "guardrails": + "description": |- + Configuration for AI Guardrails to prevent unwanted data and unsafe data in requests and responses. + "inference_table_config": + "description": |- + Configuration for payload logging using inference tables. Use these tables to monitor and audit data being sent to and received from model APIs and to improve model quality. + "rate_limits": + "description": |- + Configuration for rate limits which can be set to limit endpoint traffic. + "usage_tracking_config": + "description": |- + Configuration to enable usage tracking using system tables. These tables allow you to monitor operational usage on endpoints and their associated costs. +github.com/databricks/databricks-sdk-go/service/serving.AiGatewayGuardrailParameters: + "invalid_keywords": + "description": |- + List of invalid keywords. AI guardrail uses keyword or string matching to decide if the keyword exists in the request or response content. + "pii": + "description": |- + Configuration for guardrail PII filter. + "safety": + "description": |- + Indicates whether the safety filter is enabled. + "valid_topics": + "description": |- + The list of allowed topics. Given a chat request, this guardrail flags the request if its topic is not in the allowed topics. +github.com/databricks/databricks-sdk-go/service/serving.AiGatewayGuardrailPiiBehavior: + "behavior": + "description": |- + Behavior for PII filter. Currently only 'BLOCK' is supported. If 'BLOCK' is set for the input guardrail and the request contains PII, the request is not sent to the model server and 400 status code is returned; if 'BLOCK' is set for the output guardrail and the model response contains PII, the PII info in the response is redacted and 400 status code is returned. +github.com/databricks/databricks-sdk-go/service/serving.AiGatewayGuardrailPiiBehaviorBehavior: + "_": + "description": |- + Behavior for PII filter. Currently only 'BLOCK' is supported. If 'BLOCK' is set for the input guardrail and the request contains PII, the request is not sent to the model server and 400 status code is returned; if 'BLOCK' is set for the output guardrail and the model response contains PII, the PII info in the response is redacted and 400 status code is returned. + "enum": + - |- + NONE + - |- + BLOCK +github.com/databricks/databricks-sdk-go/service/serving.AiGatewayGuardrails: + "input": + "description": |- + Configuration for input guardrail filters. + "output": + "description": |- + Configuration for output guardrail filters. +github.com/databricks/databricks-sdk-go/service/serving.AiGatewayInferenceTableConfig: + "catalog_name": + "description": |- + The name of the catalog in Unity Catalog. Required when enabling inference tables. NOTE: On update, you have to disable inference table first in order to change the catalog name. + "enabled": + "description": |- + Indicates whether the inference table is enabled. + "schema_name": + "description": |- + The name of the schema in Unity Catalog. Required when enabling inference tables. NOTE: On update, you have to disable inference table first in order to change the schema name. + "table_name_prefix": + "description": |- + The prefix of the table in Unity Catalog. NOTE: On update, you have to disable inference table first in order to change the prefix name. +github.com/databricks/databricks-sdk-go/service/serving.AiGatewayRateLimit: + "calls": + "description": |- + Used to specify how many calls are allowed for a key within the renewal_period. + "key": + "description": |- + Key field for a rate limit. Currently, only 'user' and 'endpoint' are supported, with 'endpoint' being the default if not specified. + "renewal_period": + "description": |- + Renewal period field for a rate limit. Currently, only 'minute' is supported. +github.com/databricks/databricks-sdk-go/service/serving.AiGatewayRateLimitKey: + "_": + "description": |- + Key field for a rate limit. Currently, only 'user' and 'endpoint' are supported, with 'endpoint' being the default if not specified. + "enum": + - |- + user + - |- + endpoint +github.com/databricks/databricks-sdk-go/service/serving.AiGatewayRateLimitRenewalPeriod: + "_": + "description": |- + Renewal period field for a rate limit. Currently, only 'minute' is supported. + "enum": + - |- + minute +github.com/databricks/databricks-sdk-go/service/serving.AiGatewayUsageTrackingConfig: + "enabled": + "description": |- + Whether to enable usage tracking. +github.com/databricks/databricks-sdk-go/service/serving.AmazonBedrockConfig: + "aws_access_key_id": + "description": |- + The Databricks secret key reference for an AWS access key ID with permissions to interact with Bedrock services. If you prefer to paste your API key directly, see `aws_access_key_id`. You must provide an API key using one of the following fields: `aws_access_key_id` or `aws_access_key_id_plaintext`. + "aws_access_key_id_plaintext": + "description": |- + An AWS access key ID with permissions to interact with Bedrock services provided as a plaintext string. If you prefer to reference your key using Databricks Secrets, see `aws_access_key_id`. You must provide an API key using one of the following fields: `aws_access_key_id` or `aws_access_key_id_plaintext`. + "aws_region": + "description": |- + The AWS region to use. Bedrock has to be enabled there. + "aws_secret_access_key": + "description": |- + The Databricks secret key reference for an AWS secret access key paired with the access key ID, with permissions to interact with Bedrock services. If you prefer to paste your API key directly, see `aws_secret_access_key_plaintext`. You must provide an API key using one of the following fields: `aws_secret_access_key` or `aws_secret_access_key_plaintext`. + "aws_secret_access_key_plaintext": + "description": |- + An AWS secret access key paired with the access key ID, with permissions to interact with Bedrock services provided as a plaintext string. If you prefer to reference your key using Databricks Secrets, see `aws_secret_access_key`. You must provide an API key using one of the following fields: `aws_secret_access_key` or `aws_secret_access_key_plaintext`. + "bedrock_provider": + "description": |- + The underlying provider in Amazon Bedrock. Supported values (case insensitive) include: Anthropic, Cohere, AI21Labs, Amazon. +github.com/databricks/databricks-sdk-go/service/serving.AmazonBedrockConfigBedrockProvider: + "_": + "description": |- + The underlying provider in Amazon Bedrock. Supported values (case insensitive) include: Anthropic, Cohere, AI21Labs, Amazon. + "enum": + - |- + anthropic + - |- + cohere + - |- + ai21labs + - |- + amazon +github.com/databricks/databricks-sdk-go/service/serving.AnthropicConfig: + "anthropic_api_key": + "description": |- + The Databricks secret key reference for an Anthropic API key. If you prefer to paste your API key directly, see `anthropic_api_key_plaintext`. You must provide an API key using one of the following fields: `anthropic_api_key` or `anthropic_api_key_plaintext`. + "anthropic_api_key_plaintext": + "description": |- + The Anthropic API key provided as a plaintext string. If you prefer to reference your key using Databricks Secrets, see `anthropic_api_key`. You must provide an API key using one of the following fields: `anthropic_api_key` or `anthropic_api_key_plaintext`. +github.com/databricks/databricks-sdk-go/service/serving.AutoCaptureConfigInput: + "catalog_name": + "description": |- + The name of the catalog in Unity Catalog. NOTE: On update, you cannot change the catalog name if the inference table is already enabled. + "enabled": + "description": |- + Indicates whether the inference table is enabled. + "schema_name": + "description": |- + The name of the schema in Unity Catalog. NOTE: On update, you cannot change the schema name if the inference table is already enabled. + "table_name_prefix": + "description": |- + The prefix of the table in Unity Catalog. NOTE: On update, you cannot change the prefix name if the inference table is already enabled. +github.com/databricks/databricks-sdk-go/service/serving.CohereConfig: + "cohere_api_base": + "description": "This is an optional field to provide a customized base URL for the Cohere API. \nIf left unspecified, the standard Cohere base URL is used.\n" + "cohere_api_key": + "description": |- + The Databricks secret key reference for a Cohere API key. If you prefer to paste your API key directly, see `cohere_api_key_plaintext`. You must provide an API key using one of the following fields: `cohere_api_key` or `cohere_api_key_plaintext`. + "cohere_api_key_plaintext": + "description": |- + The Cohere API key provided as a plaintext string. If you prefer to reference your key using Databricks Secrets, see `cohere_api_key`. You must provide an API key using one of the following fields: `cohere_api_key` or `cohere_api_key_plaintext`. +github.com/databricks/databricks-sdk-go/service/serving.DatabricksModelServingConfig: + "databricks_api_token": + "description": | + The Databricks secret key reference for a Databricks API token that corresponds to a user or service + principal with Can Query access to the model serving endpoint pointed to by this external model. + If you prefer to paste your API key directly, see `databricks_api_token_plaintext`. + You must provide an API key using one of the following fields: `databricks_api_token` or `databricks_api_token_plaintext`. + "databricks_api_token_plaintext": + "description": | + The Databricks API token that corresponds to a user or service + principal with Can Query access to the model serving endpoint pointed to by this external model provided as a plaintext string. + If you prefer to reference your key using Databricks Secrets, see `databricks_api_token`. + You must provide an API key using one of the following fields: `databricks_api_token` or `databricks_api_token_plaintext`. + "databricks_workspace_url": + "description": | + The URL of the Databricks workspace containing the model serving endpoint pointed to by this external model. +github.com/databricks/databricks-sdk-go/service/serving.EndpointCoreConfigInput: + "auto_capture_config": + "description": |- + Configuration for Inference Tables which automatically logs requests and responses to Unity Catalog. + "served_entities": + "description": |- + A list of served entities for the endpoint to serve. A serving endpoint can have up to 15 served entities. + "served_models": + "description": |- + (Deprecated, use served_entities instead) A list of served models for the endpoint to serve. A serving endpoint can have up to 15 served models. + "traffic_config": + "description": |- + The traffic config defining how invocations to the serving endpoint should be routed. +github.com/databricks/databricks-sdk-go/service/serving.EndpointTag: + "key": + "description": |- + Key field for a serving endpoint tag. + "value": + "description": |- + Optional value field for a serving endpoint tag. +github.com/databricks/databricks-sdk-go/service/serving.ExternalModel: + "ai21labs_config": + "description": |- + AI21Labs Config. Only required if the provider is 'ai21labs'. + "amazon_bedrock_config": + "description": |- + Amazon Bedrock Config. Only required if the provider is 'amazon-bedrock'. + "anthropic_config": + "description": |- + Anthropic Config. Only required if the provider is 'anthropic'. + "cohere_config": + "description": |- + Cohere Config. Only required if the provider is 'cohere'. + "databricks_model_serving_config": + "description": |- + Databricks Model Serving Config. Only required if the provider is 'databricks-model-serving'. + "google_cloud_vertex_ai_config": + "description": |- + Google Cloud Vertex AI Config. Only required if the provider is 'google-cloud-vertex-ai'. + "name": + "description": |- + The name of the external model. + "openai_config": + "description": |- + OpenAI Config. Only required if the provider is 'openai'. + "palm_config": + "description": |- + PaLM Config. Only required if the provider is 'palm'. + "provider": + "description": | + The name of the provider for the external model. Currently, the supported providers are 'ai21labs', 'anthropic', + 'amazon-bedrock', 'cohere', 'databricks-model-serving', 'google-cloud-vertex-ai', 'openai', and 'palm'.", + "task": + "description": |- + The task type of the external model. +github.com/databricks/databricks-sdk-go/service/serving.ExternalModelProvider: + "_": + "description": | + The name of the provider for the external model. Currently, the supported providers are 'ai21labs', 'anthropic', + 'amazon-bedrock', 'cohere', 'databricks-model-serving', 'google-cloud-vertex-ai', 'openai', and 'palm'.", + "enum": + - |- + ai21labs + - |- + anthropic + - |- + amazon-bedrock + - |- + cohere + - |- + databricks-model-serving + - |- + google-cloud-vertex-ai + - |- + openai + - |- + palm +github.com/databricks/databricks-sdk-go/service/serving.GoogleCloudVertexAiConfig: + "private_key": + "description": |- + The Databricks secret key reference for a private key for the service account which has access to the Google Cloud Vertex AI Service. See [Best practices for managing service account keys](https://cloud.google.com/iam/docs/best-practices-for-managing-service-account-keys). If you prefer to paste your API key directly, see `private_key_plaintext`. You must provide an API key using one of the following fields: `private_key` or `private_key_plaintext` + "private_key_plaintext": + "description": |- + The private key for the service account which has access to the Google Cloud Vertex AI Service provided as a plaintext secret. See [Best practices for managing service account keys](https://cloud.google.com/iam/docs/best-practices-for-managing-service-account-keys). If you prefer to reference your key using Databricks Secrets, see `private_key`. You must provide an API key using one of the following fields: `private_key` or `private_key_plaintext`. + "project_id": + "description": |- + This is the Google Cloud project id that the service account is associated with. + "region": + "description": |- + This is the region for the Google Cloud Vertex AI Service. See [supported regions](https://cloud.google.com/vertex-ai/docs/general/locations) for more details. Some models are only available in specific regions. +github.com/databricks/databricks-sdk-go/service/serving.OpenAiConfig: + "microsoft_entra_client_id": + "description": | + This field is only required for Azure AD OpenAI and is the Microsoft Entra Client ID. + "microsoft_entra_client_secret": + "description": | + The Databricks secret key reference for a client secret used for Microsoft Entra ID authentication. + If you prefer to paste your client secret directly, see `microsoft_entra_client_secret_plaintext`. + You must provide an API key using one of the following fields: `microsoft_entra_client_secret` or `microsoft_entra_client_secret_plaintext`. + "microsoft_entra_client_secret_plaintext": + "description": | + The client secret used for Microsoft Entra ID authentication provided as a plaintext string. + If you prefer to reference your key using Databricks Secrets, see `microsoft_entra_client_secret`. + You must provide an API key using one of the following fields: `microsoft_entra_client_secret` or `microsoft_entra_client_secret_plaintext`. + "microsoft_entra_tenant_id": + "description": | + This field is only required for Azure AD OpenAI and is the Microsoft Entra Tenant ID. + "openai_api_base": + "description": | + This is a field to provide a customized base URl for the OpenAI API. + For Azure OpenAI, this field is required, and is the base URL for the Azure OpenAI API service + provided by Azure. + For other OpenAI API types, this field is optional, and if left unspecified, the standard OpenAI base URL is used. + "openai_api_key": + "description": |- + The Databricks secret key reference for an OpenAI API key using the OpenAI or Azure service. If you prefer to paste your API key directly, see `openai_api_key_plaintext`. You must provide an API key using one of the following fields: `openai_api_key` or `openai_api_key_plaintext`. + "openai_api_key_plaintext": + "description": |- + The OpenAI API key using the OpenAI or Azure service provided as a plaintext string. If you prefer to reference your key using Databricks Secrets, see `openai_api_key`. You must provide an API key using one of the following fields: `openai_api_key` or `openai_api_key_plaintext`. + "openai_api_type": + "description": | + This is an optional field to specify the type of OpenAI API to use. + For Azure OpenAI, this field is required, and adjust this parameter to represent the preferred security + access validation protocol. For access token validation, use azure. For authentication using Azure Active + Directory (Azure AD) use, azuread. + "openai_api_version": + "description": | + This is an optional field to specify the OpenAI API version. + For Azure OpenAI, this field is required, and is the version of the Azure OpenAI service to + utilize, specified by a date. + "openai_deployment_name": + "description": | + This field is only required for Azure OpenAI and is the name of the deployment resource for the + Azure OpenAI service. + "openai_organization": + "description": | + This is an optional field to specify the organization in OpenAI or Azure OpenAI. +github.com/databricks/databricks-sdk-go/service/serving.PaLmConfig: + "palm_api_key": + "description": |- + The Databricks secret key reference for a PaLM API key. If you prefer to paste your API key directly, see `palm_api_key_plaintext`. You must provide an API key using one of the following fields: `palm_api_key` or `palm_api_key_plaintext`. + "palm_api_key_plaintext": + "description": |- + The PaLM API key provided as a plaintext string. If you prefer to reference your key using Databricks Secrets, see `palm_api_key`. You must provide an API key using one of the following fields: `palm_api_key` or `palm_api_key_plaintext`. +github.com/databricks/databricks-sdk-go/service/serving.RateLimit: + "calls": + "description": |- + Used to specify how many calls are allowed for a key within the renewal_period. + "key": + "description": |- + Key field for a serving endpoint rate limit. Currently, only 'user' and 'endpoint' are supported, with 'endpoint' being the default if not specified. + "renewal_period": + "description": |- + Renewal period field for a serving endpoint rate limit. Currently, only 'minute' is supported. +github.com/databricks/databricks-sdk-go/service/serving.RateLimitKey: + "_": + "description": |- + Key field for a serving endpoint rate limit. Currently, only 'user' and 'endpoint' are supported, with 'endpoint' being the default if not specified. + "enum": + - |- + user + - |- + endpoint +github.com/databricks/databricks-sdk-go/service/serving.RateLimitRenewalPeriod: + "_": + "description": |- + Renewal period field for a serving endpoint rate limit. Currently, only 'minute' is supported. + "enum": + - |- + minute +github.com/databricks/databricks-sdk-go/service/serving.Route: + "served_model_name": + "description": |- + The name of the served model this route configures traffic for. + "traffic_percentage": + "description": |- + The percentage of endpoint traffic to send to this route. It must be an integer between 0 and 100 inclusive. +github.com/databricks/databricks-sdk-go/service/serving.ServedEntityInput: + "entity_name": + "description": | + The name of the entity to be served. The entity may be a model in the Databricks Model Registry, a model in the Unity Catalog (UC), + or a function of type FEATURE_SPEC in the UC. If it is a UC object, the full name of the object should be given in the form of + __catalog_name__.__schema_name__.__model_name__. + "entity_version": + "description": |- + The version of the model in Databricks Model Registry to be served or empty if the entity is a FEATURE_SPEC. + "environment_vars": + "description": "An object containing a set of optional, user-specified environment variable key-value pairs used for serving this entity.\nNote: this is an experimental feature and subject to change. \nExample entity environment variables that refer to Databricks secrets: `{\"OPENAI_API_KEY\": \"{{secrets/my_scope/my_key}}\", \"DATABRICKS_TOKEN\": \"{{secrets/my_scope2/my_key2}}\"}`" + "external_model": + "description": | + The external model to be served. NOTE: Only one of external_model and (entity_name, entity_version, workload_size, workload_type, and scale_to_zero_enabled) + can be specified with the latter set being used for custom model serving for a Databricks registered model. For an existing endpoint with external_model, + it cannot be updated to an endpoint without external_model. If the endpoint is created without external_model, users cannot update it to add external_model later. + The task type of all external models within an endpoint must be the same. + "instance_profile_arn": + "description": |- + ARN of the instance profile that the served entity uses to access AWS resources. + "max_provisioned_throughput": + "description": |- + The maximum tokens per second that the endpoint can scale up to. + "min_provisioned_throughput": + "description": |- + The minimum tokens per second that the endpoint can scale down to. + "name": + "description": | + The name of a served entity. It must be unique across an endpoint. A served entity name can consist of alphanumeric characters, dashes, and underscores. + If not specified for an external model, this field defaults to external_model.name, with '.' and ':' replaced with '-', and if not specified for other + entities, it defaults to -. + "scale_to_zero_enabled": + "description": |- + Whether the compute resources for the served entity should scale down to zero. + "workload_size": + "description": | + The workload size of the served entity. The workload size corresponds to a range of provisioned concurrency that the compute autoscales between. + A single unit of provisioned concurrency can process one request at a time. + Valid workload sizes are "Small" (4 - 4 provisioned concurrency), "Medium" (8 - 16 provisioned concurrency), and "Large" (16 - 64 provisioned concurrency). + If scale-to-zero is enabled, the lower bound of the provisioned concurrency for each workload size is 0. + "workload_type": + "description": | + The workload type of the served entity. The workload type selects which type of compute to use in the endpoint. The default value for this parameter is + "CPU". For deep learning workloads, GPU acceleration is available by selecting workload types like GPU_SMALL and others. + See the available [GPU types](https://docs.databricks.com/machine-learning/model-serving/create-manage-serving-endpoints.html#gpu-workload-types). +github.com/databricks/databricks-sdk-go/service/serving.ServedModelInput: + "environment_vars": + "description": "An object containing a set of optional, user-specified environment variable key-value pairs used for serving this model.\nNote: this is an experimental feature and subject to change. \nExample model environment variables that refer to Databricks secrets: `{\"OPENAI_API_KEY\": \"{{secrets/my_scope/my_key}}\", \"DATABRICKS_TOKEN\": \"{{secrets/my_scope2/my_key2}}\"}`" + "instance_profile_arn": + "description": |- + ARN of the instance profile that the served model will use to access AWS resources. + "max_provisioned_throughput": + "description": |- + The maximum tokens per second that the endpoint can scale up to. + "min_provisioned_throughput": + "description": |- + The minimum tokens per second that the endpoint can scale down to. + "model_name": + "description": | + The name of the model in Databricks Model Registry to be served or if the model resides in Unity Catalog, the full name of model, + in the form of __catalog_name__.__schema_name__.__model_name__. + "model_version": + "description": |- + The version of the model in Databricks Model Registry or Unity Catalog to be served. + "name": + "description": | + The name of a served model. It must be unique across an endpoint. If not specified, this field will default to -. + A served model name can consist of alphanumeric characters, dashes, and underscores. + "scale_to_zero_enabled": + "description": |- + Whether the compute resources for the served model should scale down to zero. + "workload_size": + "description": | + The workload size of the served model. The workload size corresponds to a range of provisioned concurrency that the compute will autoscale between. + A single unit of provisioned concurrency can process one request at a time. + Valid workload sizes are "Small" (4 - 4 provisioned concurrency), "Medium" (8 - 16 provisioned concurrency), and "Large" (16 - 64 provisioned concurrency). + If scale-to-zero is enabled, the lower bound of the provisioned concurrency for each workload size will be 0. + "workload_type": + "description": | + The workload type of the served model. The workload type selects which type of compute to use in the endpoint. The default value for this parameter is + "CPU". For deep learning workloads, GPU acceleration is available by selecting workload types like GPU_SMALL and others. + See the available [GPU types](https://docs.databricks.com/machine-learning/model-serving/create-manage-serving-endpoints.html#gpu-workload-types). +github.com/databricks/databricks-sdk-go/service/serving.ServedModelInputWorkloadSize: + "_": + "description": | + The workload size of the served model. The workload size corresponds to a range of provisioned concurrency that the compute will autoscale between. + A single unit of provisioned concurrency can process one request at a time. + Valid workload sizes are "Small" (4 - 4 provisioned concurrency), "Medium" (8 - 16 provisioned concurrency), and "Large" (16 - 64 provisioned concurrency). + If scale-to-zero is enabled, the lower bound of the provisioned concurrency for each workload size will be 0. + "enum": + - |- + Small + - |- + Medium + - |- + Large +github.com/databricks/databricks-sdk-go/service/serving.ServedModelInputWorkloadType: + "_": + "description": | + The workload type of the served model. The workload type selects which type of compute to use in the endpoint. The default value for this parameter is + "CPU". For deep learning workloads, GPU acceleration is available by selecting workload types like GPU_SMALL and others. + See the available [GPU types](https://docs.databricks.com/machine-learning/model-serving/create-manage-serving-endpoints.html#gpu-workload-types). + "enum": + - |- + CPU + - |- + GPU_SMALL + - |- + GPU_MEDIUM + - |- + GPU_LARGE + - |- + MULTIGPU_MEDIUM +github.com/databricks/databricks-sdk-go/service/serving.TrafficConfig: + "routes": + "description": |- + The list of routes that define traffic to each served entity. diff --git a/bundle/internal/schema/annotations_openapi_overrides.yml b/bundle/internal/schema/annotations_openapi_overrides.yml new file mode 100644 index 000000000..ef602d6ef --- /dev/null +++ b/bundle/internal/schema/annotations_openapi_overrides.yml @@ -0,0 +1,161 @@ +github.com/databricks/cli/bundle/config/resources.Cluster: + "data_security_mode": + "description": |- + PLACEHOLDER + "docker_image": + "description": |- + PLACEHOLDER + "kind": + "description": |- + PLACEHOLDER + "permissions": + "description": |- + PLACEHOLDER + "runtime_engine": + "description": |- + PLACEHOLDER + "workload_type": + "description": |- + PLACEHOLDER +github.com/databricks/cli/bundle/config/resources.Dashboard: + "embed_credentials": + "description": |- + PLACEHOLDER + "file_path": + "description": |- + PLACEHOLDER + "permissions": + "description": |- + PLACEHOLDER +github.com/databricks/cli/bundle/config/resources.Job: + "health": + "description": |- + PLACEHOLDER + "permissions": + "description": |- + PLACEHOLDER + "run_as": + "description": |- + PLACEHOLDER +github.com/databricks/cli/bundle/config/resources.MlflowExperiment: + "permissions": + "description": |- + PLACEHOLDER +github.com/databricks/cli/bundle/config/resources.MlflowModel: + "permissions": + "description": |- + PLACEHOLDER +github.com/databricks/cli/bundle/config/resources.ModelServingEndpoint: + "permissions": + "description": |- + PLACEHOLDER +github.com/databricks/cli/bundle/config/resources.Pipeline: + "permissions": + "description": |- + PLACEHOLDER +github.com/databricks/cli/bundle/config/resources.QualityMonitor: + "table_name": + "description": |- + PLACEHOLDER +github.com/databricks/cli/bundle/config/resources.RegisteredModel: + "grants": + "description": |- + PLACEHOLDER +github.com/databricks/cli/bundle/config/resources.Schema: + "grants": + "description": |- + PLACEHOLDER + "properties": + "description": |- + PLACEHOLDER +github.com/databricks/cli/bundle/config/resources.Volume: + "grants": + "description": |- + PLACEHOLDER + "volume_type": + "description": |- + PLACEHOLDER +github.com/databricks/databricks-sdk-go/service/compute.AwsAttributes: + "availability": + "description": |- + PLACEHOLDER + "ebs_volume_type": + "description": |- + PLACEHOLDER +github.com/databricks/databricks-sdk-go/service/compute.AzureAttributes: + "availability": + "description": |- + PLACEHOLDER +github.com/databricks/databricks-sdk-go/service/compute.ClusterSpec: + "data_security_mode": + "description": |- + PLACEHOLDER + "docker_image": + "description": |- + PLACEHOLDER + "kind": + "description": |- + PLACEHOLDER + "runtime_engine": + "description": |- + PLACEHOLDER + "workload_type": + "description": |- + PLACEHOLDER +github.com/databricks/databricks-sdk-go/service/compute.DockerImage: + "basic_auth": + "description": |- + PLACEHOLDER +github.com/databricks/databricks-sdk-go/service/compute.GcpAttributes: + "availability": + "description": |- + PLACEHOLDER +github.com/databricks/databricks-sdk-go/service/jobs.GitSource: + "git_snapshot": + "description": |- + PLACEHOLDER +github.com/databricks/databricks-sdk-go/service/jobs.JobEnvironment: + "spec": + "description": |- + PLACEHOLDER +github.com/databricks/databricks-sdk-go/service/jobs.JobsHealthRule: + "metric": + "description": |- + PLACEHOLDER + "op": + "description": |- + PLACEHOLDER +github.com/databricks/databricks-sdk-go/service/jobs.JobsHealthRules: + "rules": + "description": |- + PLACEHOLDER +github.com/databricks/databricks-sdk-go/service/jobs.RunJobTask: + "python_named_params": + "description": |- + PLACEHOLDER +github.com/databricks/databricks-sdk-go/service/jobs.Task: + "health": + "description": |- + PLACEHOLDER +github.com/databricks/databricks-sdk-go/service/jobs.TriggerSettings: + "table_update": + "description": |- + PLACEHOLDER +github.com/databricks/databricks-sdk-go/service/jobs.Webhook: + "id": + "description": |- + PLACEHOLDER +github.com/databricks/databricks-sdk-go/service/pipelines.CronTrigger: + "quartz_cron_schedule": + "description": |- + PLACEHOLDER + "timezone_id": + "description": |- + PLACEHOLDER +github.com/databricks/databricks-sdk-go/service/pipelines.PipelineTrigger: + "cron": + "description": |- + PLACEHOLDER + "manual": + "description": |- + PLACEHOLDER diff --git a/bundle/internal/schema/annotations_test.go b/bundle/internal/schema/annotations_test.go new file mode 100644 index 000000000..d7e2fea7c --- /dev/null +++ b/bundle/internal/schema/annotations_test.go @@ -0,0 +1,44 @@ +package main + +import ( + "testing" +) + +func TestConvertLinksToAbsoluteUrl(t *testing.T) { + tests := []struct { + input string + expected string + }{ + { + input: "", + expected: "", + }, + { + input: "Some text (not a link)", + expected: "Some text (not a link)", + }, + { + input: "This is a link to [_](#section)", + expected: "This is a link to [section](https://docs.databricks.com/dev-tools/bundles/reference.html#section)", + }, + { + input: "This is a link to [_](/dev-tools/bundles/resources.html#dashboard)", + expected: "This is a link to [dashboard](https://docs.databricks.com/dev-tools/bundles/resources.html#dashboard)", + }, + { + input: "This is a link to [_](/dev-tools/bundles/resources.html)", + expected: "This is a link to [link](https://docs.databricks.com/dev-tools/bundles/resources.html)", + }, + { + input: "This is a link to [external](https://external.com)", + expected: "This is a link to [external](https://external.com)", + }, + } + + for _, test := range tests { + result := convertLinksToAbsoluteUrl(test.input) + if result != test.expected { + t.Errorf("For input '%s', expected '%s', but got '%s'", test.input, test.expected, result) + } + } +} diff --git a/bundle/internal/schema/main.go b/bundle/internal/schema/main.go index 881ce3496..77927a966 100644 --- a/bundle/internal/schema/main.go +++ b/bundle/internal/schema/main.go @@ -5,6 +5,7 @@ import ( "fmt" "log" "os" + "path/filepath" "reflect" "github.com/databricks/cli/bundle/config" @@ -43,18 +44,20 @@ func addInterpolationPatterns(typ reflect.Type, s jsonschema.Schema) jsonschema. case jsonschema.ArrayType, jsonschema.ObjectType: // arrays and objects can have complex variable values specified. return jsonschema.Schema{ - AnyOf: []jsonschema.Schema{ + // OneOf is used because we don't expect more than 1 match and schema-based auto-complete works better with OneOf + OneOf: []jsonschema.Schema{ s, { Type: jsonschema.StringType, Pattern: interpolationPattern("var"), - }}, + }, + }, } case jsonschema.IntegerType, jsonschema.NumberType, jsonschema.BooleanType: // primitives can have variable values, or references like ${bundle.xyz} // or ${workspace.xyz} return jsonschema.Schema{ - AnyOf: []jsonschema.Schema{ + OneOf: []jsonschema.Schema{ s, {Type: jsonschema.StringType, Pattern: interpolationPattern("resources")}, {Type: jsonschema.StringType, Pattern: interpolationPattern("bundle")}, @@ -112,44 +115,67 @@ func makeVolumeTypeOptional(typ reflect.Type, s jsonschema.Schema) jsonschema.Sc } func main() { - if len(os.Args) != 2 { - fmt.Println("Usage: go run main.go ") + if len(os.Args) != 3 { + fmt.Println("Usage: go run main.go ") os.Exit(1) } + // Directory with annotation files + workdir := os.Args[1] // Output file, where the generated JSON schema will be written to. - outputFile := os.Args[1] + outputFile := os.Args[2] + + generateSchema(workdir, outputFile) +} + +func generateSchema(workdir, outputFile string) { + annotationsPath := filepath.Join(workdir, "annotations.yml") + annotationsOpenApiPath := filepath.Join(workdir, "annotations_openapi.yml") + annotationsOpenApiOverridesPath := filepath.Join(workdir, "annotations_openapi_overrides.yml") // Input file, the databricks openapi spec. inputFile := os.Getenv("DATABRICKS_OPENAPI_SPEC") - if inputFile == "" { - log.Fatal("DATABRICKS_OPENAPI_SPEC environment variable not set") + if inputFile != "" { + p, err := newParser(inputFile) + if err != nil { + log.Fatal(err) + } + fmt.Printf("Writing OpenAPI annotations to %s\n", annotationsOpenApiPath) + err = p.extractAnnotations(reflect.TypeOf(config.Root{}), annotationsOpenApiPath, annotationsOpenApiOverridesPath) + if err != nil { + log.Fatal(err) + } } - p, err := newParser(inputFile) + a, err := newAnnotationHandler([]string{annotationsOpenApiPath, annotationsOpenApiOverridesPath, annotationsPath}) if err != nil { log.Fatal(err) } // Generate the JSON schema from the bundle Go struct. s, err := jsonschema.FromType(reflect.TypeOf(config.Root{}), []func(reflect.Type, jsonschema.Schema) jsonschema.Schema{ - p.addDescriptions, - p.addEnums, removeJobsFields, makeVolumeTypeOptional, + a.addAnnotations, addInterpolationPatterns, }) if err != nil { log.Fatal(err) } + // Overwrite the input annotation file, adding missing annotations + err = a.syncWithMissingAnnotations(annotationsPath) + if err != nil { + log.Fatal(err) + } + b, err := json.MarshalIndent(s, "", " ") if err != nil { log.Fatal(err) } // Write the schema descriptions to the output file. - err = os.WriteFile(outputFile, b, 0644) + err = os.WriteFile(outputFile, b, 0o644) if err != nil { log.Fatal(err) } diff --git a/bundle/internal/schema/main_test.go b/bundle/internal/schema/main_test.go new file mode 100644 index 000000000..06e89c856 --- /dev/null +++ b/bundle/internal/schema/main_test.go @@ -0,0 +1,125 @@ +package main + +import ( + "bytes" + "io" + "os" + "path" + "reflect" + "strings" + "testing" + + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/dyn/merge" + "github.com/databricks/cli/libs/dyn/yamlloader" + "github.com/databricks/cli/libs/jsonschema" + "github.com/stretchr/testify/assert" + "gopkg.in/yaml.v3" +) + +func copyFile(src, dst string) error { + in, err := os.Open(src) + if err != nil { + return err + } + defer in.Close() + + out, err := os.Create(dst) + if err != nil { + return err + } + defer out.Close() + + _, err = io.Copy(out, in) + if err != nil { + return err + } + + return out.Close() +} + +// Checks whether descriptions are added for new config fields in the annotations.yml file +// If this test fails either manually add descriptions to the `annotations.yml` or do the following: +// 1. for fields described outside of CLI package fetch latest schema from the OpenAPI spec and add path to file to DATABRICKS_OPENAPI_SPEC env variable +// 2. run `make schema` from the repository root to add placeholder descriptions +// 2. replace all "PLACEHOLDER" values with the actual descriptions if possible +// 3. run `make schema` again to regenerate the schema with acutal descriptions +func TestRequiredAnnotationsForNewFields(t *testing.T) { + workdir := t.TempDir() + annotationsPath := path.Join(workdir, "annotations.yml") + annotationsOpenApiPath := path.Join(workdir, "annotations_openapi.yml") + annotationsOpenApiOverridesPath := path.Join(workdir, "annotations_openapi_overrides.yml") + + // Copy existing annotation files from the same folder as this test + err := copyFile("annotations.yml", annotationsPath) + assert.NoError(t, err) + err = copyFile("annotations_openapi.yml", annotationsOpenApiPath) + assert.NoError(t, err) + err = copyFile("annotations_openapi_overrides.yml", annotationsOpenApiOverridesPath) + assert.NoError(t, err) + + generateSchema(workdir, path.Join(t.TempDir(), "schema.json")) + + originalFile, err := os.ReadFile("annotations.yml") + assert.NoError(t, err) + currentFile, err := os.ReadFile(annotationsPath) + assert.NoError(t, err) + original, err := yamlloader.LoadYAML("", bytes.NewBuffer(originalFile)) + assert.NoError(t, err) + current, err := yamlloader.LoadYAML("", bytes.NewBuffer(currentFile)) + assert.NoError(t, err) + + // Collect added paths. + var updatedFieldPaths []string + _, err = merge.Override(original, current, merge.OverrideVisitor{ + VisitInsert: func(basePath dyn.Path, right dyn.Value) (dyn.Value, error) { + updatedFieldPaths = append(updatedFieldPaths, basePath.String()) + return right, nil + }, + }) + assert.NoError(t, err) + assert.Empty(t, updatedFieldPaths, "Missing JSON-schema descriptions for new config fields in bundle/internal/schema/annotations.yml:\n%s", strings.Join(updatedFieldPaths, "\n")) +} + +// Checks whether types in annotation files are still present in Config type +func TestNoDetachedAnnotations(t *testing.T) { + files := []string{ + "annotations.yml", + "annotations_openapi.yml", + "annotations_openapi_overrides.yml", + } + + types := map[string]bool{} + for _, file := range files { + annotations, err := getAnnotations(file) + assert.NoError(t, err) + for k := range annotations { + types[k] = false + } + } + + _, err := jsonschema.FromType(reflect.TypeOf(config.Root{}), []func(reflect.Type, jsonschema.Schema) jsonschema.Schema{ + func(typ reflect.Type, s jsonschema.Schema) jsonschema.Schema { + delete(types, getPath(typ)) + return s + }, + }) + assert.NoError(t, err) + + for typ := range types { + t.Errorf("Type `%s` in annotations file is not found in `root.Config` type", typ) + } + assert.Empty(t, types, "Detached annotations found, regenerate schema and check for package path changes") +} + +func getAnnotations(path string) (annotationFile, error) { + b, err := os.ReadFile(path) + if err != nil { + return nil, err + } + + var data annotationFile + err = yaml.Unmarshal(b, &data) + return data, err +} diff --git a/bundle/internal/schema/parser.go b/bundle/internal/schema/parser.go index ef3d6e719..919908429 100644 --- a/bundle/internal/schema/parser.go +++ b/bundle/internal/schema/parser.go @@ -9,6 +9,7 @@ import ( "strings" "github.com/databricks/cli/libs/jsonschema" + "gopkg.in/yaml.v3" ) type Components struct { @@ -23,6 +24,8 @@ type openapiParser struct { ref map[string]jsonschema.Schema } +const RootTypeKey = "_" + func newParser(path string) (*openapiParser, error) { b, err := os.ReadFile(path) if err != nil { @@ -51,7 +54,7 @@ func (p *openapiParser) findRef(typ reflect.Type) (jsonschema.Schema, bool) { // Check for embedded Databricks Go SDK types. if typ.Kind() == reflect.Struct { - for i := 0; i < typ.NumField(); i++ { + for i := range typ.NumField() { if !typ.Field(i).Anonymous { continue } @@ -78,7 +81,11 @@ func (p *openapiParser) findRef(typ reflect.Type) (jsonschema.Schema, bool) { // Skip if the type is not in the openapi spec. _, ok := p.ref[k] if !ok { - continue + k = mapIncorrectTypNames(k) + _, ok = p.ref[k] + if !ok { + continue + } } // Return the first Go SDK type found in the openapi spec. @@ -88,36 +95,122 @@ func (p *openapiParser) findRef(typ reflect.Type) (jsonschema.Schema, bool) { return jsonschema.Schema{}, false } +// Fix inconsistent type names between the Go SDK and the OpenAPI spec. +// E.g. "serving.PaLmConfig" in the Go SDK is "serving.PaLMConfig" in the OpenAPI spec. +func mapIncorrectTypNames(ref string) string { + switch ref { + case "serving.PaLmConfig": + return "serving.PaLMConfig" + case "serving.OpenAiConfig": + return "serving.OpenAIConfig" + case "serving.GoogleCloudVertexAiConfig": + return "serving.GoogleCloudVertexAIConfig" + case "serving.Ai21LabsConfig": + return "serving.AI21LabsConfig" + default: + return ref + } +} + // Use the OpenAPI spec to load descriptions for the given type. -func (p *openapiParser) addDescriptions(typ reflect.Type, s jsonschema.Schema) jsonschema.Schema { - ref, ok := p.findRef(typ) - if !ok { - return s +func (p *openapiParser) extractAnnotations(typ reflect.Type, outputPath, overridesPath string) error { + annotations := annotationFile{} + overrides := annotationFile{} + + b, err := os.ReadFile(overridesPath) + if err != nil { + return err + } + err = yaml.Unmarshal(b, &overrides) + if err != nil { + return err + } + if overrides == nil { + overrides = annotationFile{} } - s.Description = ref.Description - for k, v := range s.Properties { - if refProp, ok := ref.Properties[k]; ok { - v.Description = refProp.Description - } + _, err = jsonschema.FromType(typ, []func(reflect.Type, jsonschema.Schema) jsonschema.Schema{ + func(typ reflect.Type, s jsonschema.Schema) jsonschema.Schema { + ref, ok := p.findRef(typ) + if !ok { + return s + } + + basePath := getPath(typ) + pkg := map[string]annotation{} + annotations[basePath] = pkg + + if ref.Description != "" || ref.Enum != nil { + pkg[RootTypeKey] = annotation{Description: ref.Description, Enum: ref.Enum} + } + + for k := range s.Properties { + if refProp, ok := ref.Properties[k]; ok { + pkg[k] = annotation{Description: refProp.Description, Enum: refProp.Enum} + if refProp.Description == "" { + addEmptyOverride(k, basePath, overrides) + } + } else { + addEmptyOverride(k, basePath, overrides) + } + } + return s + }, + }) + if err != nil { + return err } - return s + err = saveYamlWithStyle(overridesPath, overrides) + if err != nil { + return err + } + err = saveYamlWithStyle(outputPath, annotations) + if err != nil { + return err + } + err = prependCommentToFile(outputPath, "# This file is auto-generated. DO NOT EDIT.\n") + if err != nil { + return err + } + return nil } -// Use the OpenAPI spec add enum values for the given type. -func (p *openapiParser) addEnums(typ reflect.Type, s jsonschema.Schema) jsonschema.Schema { - ref, ok := p.findRef(typ) - if !ok { - return s +func prependCommentToFile(outputPath, comment string) error { + b, err := os.ReadFile(outputPath) + if err != nil { + return err } - - s.Enum = append(s.Enum, ref.Enum...) - for k, v := range s.Properties { - if refProp, ok := ref.Properties[k]; ok { - v.Enum = append(v.Enum, refProp.Enum...) - } + f, err := os.OpenFile(outputPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0o644) + if err != nil { + return err } + defer f.Close() - return s + _, err = f.WriteString(comment) + if err != nil { + return err + } + _, err = f.Write(b) + return err +} + +func addEmptyOverride(key, pkg string, overridesFile annotationFile) { + if overridesFile[pkg] == nil { + overridesFile[pkg] = map[string]annotation{} + } + + overrides := overridesFile[pkg] + if overrides[key].Description == "" { + overrides[key] = annotation{Description: Placeholder} + } + + a, ok := overrides[key] + if !ok { + a = annotation{} + } + if a.Description == "" { + a.Description = Placeholder + } + overrides[key] = a } diff --git a/bundle/internal/schema/testdata/pass/target_variable.yml b/bundle/internal/schema/testdata/pass/target_variable.yml new file mode 100644 index 000000000..34af94658 --- /dev/null +++ b/bundle/internal/schema/testdata/pass/target_variable.yml @@ -0,0 +1,5 @@ +targets: + production: + variables: + myvar: + default: true diff --git a/bundle/internal/tf/codegen/generator/walker.go b/bundle/internal/tf/codegen/generator/walker.go index 2ed044c3d..0e9d73c4e 100644 --- a/bundle/internal/tf/codegen/generator/walker.go +++ b/bundle/internal/tf/codegen/generator/walker.go @@ -2,9 +2,8 @@ package generator import ( "fmt" - "strings" - "slices" + "strings" tfjson "github.com/hashicorp/terraform-json" "github.com/iancoleman/strcase" @@ -70,6 +69,25 @@ func nestedBlockKeys(block *tfjson.SchemaBlock) []string { return keys } +func nestedField(name []string, k string, isRef bool) field { + // Collect field properties. + fieldName := strcase.ToCamel(k) + fieldTypePrefix := "" + if isRef { + fieldTypePrefix = "*" + } else { + fieldTypePrefix = "[]" + } + fieldType := fmt.Sprintf("%s%s", fieldTypePrefix, strings.Join(append(name, strcase.ToCamel(k)), "")) + fieldTag := fmt.Sprintf("%s,omitempty", k) + + return field{ + Name: fieldName, + Type: fieldType, + Tag: fieldTag, + } +} + func (w *walker) walk(block *tfjson.SchemaBlock, name []string) error { // Produce nested types before this block itself. // This ensures types are defined before they are referenced. @@ -91,10 +109,24 @@ func (w *walker) walk(block *tfjson.SchemaBlock, name []string) error { v := block.Attributes[k] // Assert the attribute type is always set. - if v.AttributeType == cty.NilType { + if v.AttributeType == cty.NilType && v.AttributeNestedType == nil { return fmt.Errorf("unexpected nil type for attribute %s", k) } + // If there is a nested type, walk it and continue to next attribute. + if v.AttributeNestedType != nil { + nestedBlock := &tfjson.SchemaBlock{ + Attributes: v.AttributeNestedType.Attributes, + } + err := w.walk(nestedBlock, append(name, strcase.ToCamel(k))) + if err != nil { + return err + } + // Append to list of fields for type. + typ.Fields = append(typ.Fields, nestedField(name, k, v.AttributeNestedType.NestingMode == tfjson.SchemaNestingModeSingle)) + continue + } + // Collect field properties. fieldName := strcase.ToCamel(k) fieldType := processAttributeType(v.AttributeType) @@ -117,24 +149,8 @@ func (w *walker) walk(block *tfjson.SchemaBlock, name []string) error { // Declare nested blocks. for _, k := range nestedBlockKeys(block) { v := block.NestedBlocks[k] - - // Collect field properties. - fieldName := strcase.ToCamel(k) - fieldTypePrefix := "" - if v.MaxItems == 1 { - fieldTypePrefix = "*" - } else { - fieldTypePrefix = "[]" - } - fieldType := fmt.Sprintf("%s%s", fieldTypePrefix, strings.Join(append(name, strcase.ToCamel(k)), "")) - fieldTag := fmt.Sprintf("%s,omitempty", k) - // Append to list of fields for type. - typ.Fields = append(typ.Fields, field{ - Name: fieldName, - Type: fieldType, - Tag: fieldTag, - }) + typ.Fields = append(typ.Fields, nestedField(name, k, v.MaxItems == 1)) } // Append type to list of structs. diff --git a/bundle/internal/tf/codegen/go.mod b/bundle/internal/tf/codegen/go.mod index 67ac4bbc7..e9fc83615 100644 --- a/bundle/internal/tf/codegen/go.mod +++ b/bundle/internal/tf/codegen/go.mod @@ -1,24 +1,27 @@ module github.com/databricks/cli/bundle/internal/tf/codegen -go 1.21 +go 1.23 + +toolchain go1.23.4 require ( - github.com/hashicorp/go-version v1.6.0 - github.com/hashicorp/hc-install v0.6.3 - github.com/hashicorp/terraform-exec v0.20.0 - github.com/hashicorp/terraform-json v0.21.0 + github.com/hashicorp/go-version v1.7.0 + github.com/hashicorp/hc-install v0.9.0 + github.com/hashicorp/terraform-exec v0.21.0 + github.com/hashicorp/terraform-json v0.23.0 github.com/iancoleman/strcase v0.3.0 - github.com/zclconf/go-cty v1.14.2 - golang.org/x/exp v0.0.0-20240213143201-ec583247a57a + github.com/zclconf/go-cty v1.15.1 + golang.org/x/exp v0.0.0-20241204233417-43b7b7cde48d ) require ( - github.com/ProtonMail/go-crypto v1.1.0-alpha.0 // indirect + github.com/ProtonMail/go-crypto v1.1.3 // indirect github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect - github.com/cloudflare/circl v1.3.7 // indirect + github.com/cloudflare/circl v1.5.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect - golang.org/x/crypto v0.19.0 // indirect - golang.org/x/mod v0.15.0 // indirect - golang.org/x/sys v0.17.0 // indirect - golang.org/x/text v0.14.0 // indirect + github.com/hashicorp/go-retryablehttp v0.7.7 // indirect + golang.org/x/crypto v0.31.0 // indirect + golang.org/x/mod v0.22.0 // indirect + golang.org/x/sys v0.28.0 // indirect + golang.org/x/text v0.21.0 // indirect ) diff --git a/bundle/internal/tf/codegen/go.sum b/bundle/internal/tf/codegen/go.sum index 7a4023ba5..1ce56777f 100644 --- a/bundle/internal/tf/codegen/go.sum +++ b/bundle/internal/tf/codegen/go.sum @@ -2,67 +2,79 @@ dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= -github.com/ProtonMail/go-crypto v1.1.0-alpha.0 h1:nHGfwXmFvJrSR9xu8qL7BkO4DqTHXE9N5vPhgY2I+j0= -github.com/ProtonMail/go-crypto v1.1.0-alpha.0/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE= +github.com/ProtonMail/go-crypto v1.1.3 h1:nRBOetoydLeUb4nHajyO2bKqMLfWQ/ZPwkXqXxPxCFk= +github.com/ProtonMail/go-crypto v1.1.3/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE= github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew1u1fNQOlOtuGxQY= github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4= -github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU= -github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA= +github.com/cloudflare/circl v1.5.0 h1:hxIWksrX6XN5a1L2TI/h53AGPhNHoUBo+TD1ms9+pys= +github.com/cloudflare/circl v1.5.0/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= +github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= +github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic= github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU= github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgFRpCtpDCKow= -github.com/go-git/go-git/v5 v5.11.0 h1:XIZc1p+8YzypNr34itUfSvYJcv+eYdTnTvOZ2vD3cA4= -github.com/go-git/go-git/v5 v5.11.0/go.mod h1:6GFcX2P3NM7FPBfpePbpLd21XxsgdAt+lKqXmCUiUCY= +github.com/go-git/go-git/v5 v5.12.0 h1:7Md+ndsjrzZxbddRDZjF14qK+NN56sy6wkqaVrjZtys= +github.com/go-git/go-git/v5 v5.12.0/go.mod h1:FTM9VKtnI2m65hNI/TenDDDnUf2Q9FHnXYjuz9i5OEY= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= -github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= -github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/hc-install v0.6.3 h1:yE/r1yJvWbtrJ0STwScgEnCanb0U9v7zp0Gbkmcoxqs= -github.com/hashicorp/hc-install v0.6.3/go.mod h1:KamGdbodYzlufbWh4r9NRo8y6GLHWZP2GBtdnms1Ln0= -github.com/hashicorp/terraform-exec v0.20.0 h1:DIZnPsqzPGuUnq6cH8jWcPunBfY+C+M8JyYF3vpnuEo= -github.com/hashicorp/terraform-exec v0.20.0/go.mod h1:ckKGkJWbsNqFKV1itgMnE0hY9IYf1HoiekpuN0eWoDw= -github.com/hashicorp/terraform-json v0.21.0 h1:9NQxbLNqPbEMze+S6+YluEdXgJmhQykRyRNd+zTI05U= -github.com/hashicorp/terraform-json v0.21.0/go.mod h1:qdeBs11ovMzo5puhrRibdD6d2Dq6TyE/28JiU4tIQxk= +github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= +github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU= +github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk= +github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= +github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/hc-install v0.9.0 h1:2dIk8LcvANwtv3QZLckxcjyF5w8KVtiMxu6G6eLhghE= +github.com/hashicorp/hc-install v0.9.0/go.mod h1:+6vOP+mf3tuGgMApVYtmsnDoKWMDcFXeTxCACYZ8SFg= +github.com/hashicorp/terraform-exec v0.21.0 h1:uNkLAe95ey5Uux6KJdua6+cv8asgILFVWkd/RG0D2XQ= +github.com/hashicorp/terraform-exec v0.21.0/go.mod h1:1PPeMYou+KDUSSeRE9szMZ/oHf4fYUmB923Wzbq1ICg= +github.com/hashicorp/terraform-json v0.23.0 h1:sniCkExU4iKtTADReHzACkk8fnpQXrdD2xoR+lppBkI= +github.com/hashicorp/terraform-json v0.23.0/go.mod h1:MHdXbBAbSg0GvzuWazEGKAn/cyNfIB7mN6y7KJN6y2c= github.com/iancoleman/strcase v0.3.0 h1:nTXanmYxhfFAMjZL34Ov6gkzEsSJZ5DbhxWjvSASxEI= github.com/iancoleman/strcase v0.3.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4= github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4= github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI= -github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= -github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= -github.com/skeema/knownhosts v1.2.1 h1:SHWdIUa82uGZz+F+47k8SY4QhhI291cXCpopT1lK2AQ= -github.com/skeema/knownhosts v1.2.1/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo= +github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8= +github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= +github.com/skeema/knownhosts v1.2.2 h1:Iug2P4fLmDw9f41PB6thxUkNUkJzB5i+1/exaj40L3A= +github.com/skeema/knownhosts v1.2.2/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo= github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= -github.com/zclconf/go-cty v1.14.2 h1:kTG7lqmBou0Zkx35r6HJHUQTvaRPr5bIAf3AoHS0izI= -github.com/zclconf/go-cty v1.14.2/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= -golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo= -golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= -golang.org/x/exp v0.0.0-20240213143201-ec583247a57a h1:HinSgX1tJRX3KsL//Gxynpw5CTOAIPhgL4W8PNiIpVE= -golang.org/x/exp v0.0.0-20240213143201-ec583247a57a/go.mod h1:CxmFvTBINI24O/j8iY7H1xHzx2i4OsyguNBmN/uPtqc= -golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8= -golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= -golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= -golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ= -golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg= +github.com/zclconf/go-cty v1.15.1 h1:RgQYm4j2EvoBRXOPxhUvxPzRrGDo1eCOhHXuGfrj5S0= +github.com/zclconf/go-cty v1.15.1/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= +golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= +golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= +golang.org/x/exp v0.0.0-20241204233417-43b7b7cde48d h1:0olWaB5pg3+oychR51GUVCEsGkeCU/2JxjBgIo4f3M0= +golang.org/x/exp v0.0.0-20241204233417-43b7b7cde48d/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c= +golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4= +golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= +golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= +golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= +golang.org/x/tools v0.28.0 h1:WuB6qZ4RPCQo5aP3WdKZS7i595EdWqWR8vqJTlwTVK8= +golang.org/x/tools v0.28.0/go.mod h1:dcIOrVd3mfQKTgrDVQHqCPMWy6lnhfhtX3hLXYVLfRw= gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= diff --git a/bundle/internal/tf/codegen/schema/version.go b/bundle/internal/tf/codegen/schema/version.go index a778e0232..27c4b16cd 100644 --- a/bundle/internal/tf/codegen/schema/version.go +++ b/bundle/internal/tf/codegen/schema/version.go @@ -1,3 +1,3 @@ package schema -const ProviderVersion = "1.59.0" +const ProviderVersion = "1.62.0" diff --git a/bundle/internal/tf/codegen/templates/root.go.tmpl b/bundle/internal/tf/codegen/templates/root.go.tmpl index e03e978f0..b5c53c161 100644 --- a/bundle/internal/tf/codegen/templates/root.go.tmpl +++ b/bundle/internal/tf/codegen/templates/root.go.tmpl @@ -25,9 +25,9 @@ const ProviderVersion = "{{ .ProviderVersion }}" func NewRoot() *Root { return &Root{ - Terraform: map[string]interface{}{ - "required_providers": map[string]interface{}{ - "databricks": map[string]interface{}{ + Terraform: map[string]any{ + "required_providers": map[string]any{ + "databricks": map[string]any{ "source": ProviderSource, "version": ProviderVersion, }, diff --git a/bundle/internal/tf/schema/data_source_app.go b/bundle/internal/tf/schema/data_source_app.go new file mode 100644 index 000000000..9b4ef077e --- /dev/null +++ b/bundle/internal/tf/schema/data_source_app.go @@ -0,0 +1,107 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. + +package schema + +type DataSourceAppAppActiveDeploymentDeploymentArtifacts struct { + SourceCodePath string `json:"source_code_path,omitempty"` +} + +type DataSourceAppAppActiveDeploymentStatus struct { + Message string `json:"message,omitempty"` + State string `json:"state,omitempty"` +} + +type DataSourceAppAppActiveDeployment struct { + CreateTime string `json:"create_time,omitempty"` + Creator string `json:"creator,omitempty"` + DeploymentArtifacts *DataSourceAppAppActiveDeploymentDeploymentArtifacts `json:"deployment_artifacts,omitempty"` + DeploymentId string `json:"deployment_id,omitempty"` + Mode string `json:"mode,omitempty"` + SourceCodePath string `json:"source_code_path,omitempty"` + Status *DataSourceAppAppActiveDeploymentStatus `json:"status,omitempty"` + UpdateTime string `json:"update_time,omitempty"` +} + +type DataSourceAppAppAppStatus struct { + Message string `json:"message,omitempty"` + State string `json:"state,omitempty"` +} + +type DataSourceAppAppComputeStatus struct { + Message string `json:"message,omitempty"` + State string `json:"state,omitempty"` +} + +type DataSourceAppAppPendingDeploymentDeploymentArtifacts struct { + SourceCodePath string `json:"source_code_path,omitempty"` +} + +type DataSourceAppAppPendingDeploymentStatus struct { + Message string `json:"message,omitempty"` + State string `json:"state,omitempty"` +} + +type DataSourceAppAppPendingDeployment struct { + CreateTime string `json:"create_time,omitempty"` + Creator string `json:"creator,omitempty"` + DeploymentArtifacts *DataSourceAppAppPendingDeploymentDeploymentArtifacts `json:"deployment_artifacts,omitempty"` + DeploymentId string `json:"deployment_id,omitempty"` + Mode string `json:"mode,omitempty"` + SourceCodePath string `json:"source_code_path,omitempty"` + Status *DataSourceAppAppPendingDeploymentStatus `json:"status,omitempty"` + UpdateTime string `json:"update_time,omitempty"` +} + +type DataSourceAppAppResourcesJob struct { + Id string `json:"id"` + Permission string `json:"permission"` +} + +type DataSourceAppAppResourcesSecret struct { + Key string `json:"key"` + Permission string `json:"permission"` + Scope string `json:"scope"` +} + +type DataSourceAppAppResourcesServingEndpoint struct { + Name string `json:"name"` + Permission string `json:"permission"` +} + +type DataSourceAppAppResourcesSqlWarehouse struct { + Id string `json:"id"` + Permission string `json:"permission"` +} + +type DataSourceAppAppResources struct { + Description string `json:"description,omitempty"` + Job *DataSourceAppAppResourcesJob `json:"job,omitempty"` + Name string `json:"name"` + Secret *DataSourceAppAppResourcesSecret `json:"secret,omitempty"` + ServingEndpoint *DataSourceAppAppResourcesServingEndpoint `json:"serving_endpoint,omitempty"` + SqlWarehouse *DataSourceAppAppResourcesSqlWarehouse `json:"sql_warehouse,omitempty"` +} + +type DataSourceAppApp struct { + ActiveDeployment *DataSourceAppAppActiveDeployment `json:"active_deployment,omitempty"` + AppStatus *DataSourceAppAppAppStatus `json:"app_status,omitempty"` + ComputeStatus *DataSourceAppAppComputeStatus `json:"compute_status,omitempty"` + CreateTime string `json:"create_time,omitempty"` + Creator string `json:"creator,omitempty"` + DefaultSourceCodePath string `json:"default_source_code_path,omitempty"` + Description string `json:"description,omitempty"` + Name string `json:"name"` + PendingDeployment *DataSourceAppAppPendingDeployment `json:"pending_deployment,omitempty"` + Resources []DataSourceAppAppResources `json:"resources,omitempty"` + ServicePrincipalClientId string `json:"service_principal_client_id,omitempty"` + ServicePrincipalId int `json:"service_principal_id,omitempty"` + ServicePrincipalName string `json:"service_principal_name,omitempty"` + UpdateTime string `json:"update_time,omitempty"` + Updater string `json:"updater,omitempty"` + Url string `json:"url,omitempty"` +} + +type DataSourceApp struct { + App *DataSourceAppApp `json:"app,omitempty"` + Name string `json:"name"` +} diff --git a/bundle/internal/tf/schema/data_source_apps.go b/bundle/internal/tf/schema/data_source_apps.go new file mode 100644 index 000000000..dd381eabf --- /dev/null +++ b/bundle/internal/tf/schema/data_source_apps.go @@ -0,0 +1,106 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. + +package schema + +type DataSourceAppsAppActiveDeploymentDeploymentArtifacts struct { + SourceCodePath string `json:"source_code_path,omitempty"` +} + +type DataSourceAppsAppActiveDeploymentStatus struct { + Message string `json:"message,omitempty"` + State string `json:"state,omitempty"` +} + +type DataSourceAppsAppActiveDeployment struct { + CreateTime string `json:"create_time,omitempty"` + Creator string `json:"creator,omitempty"` + DeploymentArtifacts *DataSourceAppsAppActiveDeploymentDeploymentArtifacts `json:"deployment_artifacts,omitempty"` + DeploymentId string `json:"deployment_id,omitempty"` + Mode string `json:"mode,omitempty"` + SourceCodePath string `json:"source_code_path,omitempty"` + Status *DataSourceAppsAppActiveDeploymentStatus `json:"status,omitempty"` + UpdateTime string `json:"update_time,omitempty"` +} + +type DataSourceAppsAppAppStatus struct { + Message string `json:"message,omitempty"` + State string `json:"state,omitempty"` +} + +type DataSourceAppsAppComputeStatus struct { + Message string `json:"message,omitempty"` + State string `json:"state,omitempty"` +} + +type DataSourceAppsAppPendingDeploymentDeploymentArtifacts struct { + SourceCodePath string `json:"source_code_path,omitempty"` +} + +type DataSourceAppsAppPendingDeploymentStatus struct { + Message string `json:"message,omitempty"` + State string `json:"state,omitempty"` +} + +type DataSourceAppsAppPendingDeployment struct { + CreateTime string `json:"create_time,omitempty"` + Creator string `json:"creator,omitempty"` + DeploymentArtifacts *DataSourceAppsAppPendingDeploymentDeploymentArtifacts `json:"deployment_artifacts,omitempty"` + DeploymentId string `json:"deployment_id,omitempty"` + Mode string `json:"mode,omitempty"` + SourceCodePath string `json:"source_code_path,omitempty"` + Status *DataSourceAppsAppPendingDeploymentStatus `json:"status,omitempty"` + UpdateTime string `json:"update_time,omitempty"` +} + +type DataSourceAppsAppResourcesJob struct { + Id string `json:"id"` + Permission string `json:"permission"` +} + +type DataSourceAppsAppResourcesSecret struct { + Key string `json:"key"` + Permission string `json:"permission"` + Scope string `json:"scope"` +} + +type DataSourceAppsAppResourcesServingEndpoint struct { + Name string `json:"name"` + Permission string `json:"permission"` +} + +type DataSourceAppsAppResourcesSqlWarehouse struct { + Id string `json:"id"` + Permission string `json:"permission"` +} + +type DataSourceAppsAppResources struct { + Description string `json:"description,omitempty"` + Job *DataSourceAppsAppResourcesJob `json:"job,omitempty"` + Name string `json:"name"` + Secret *DataSourceAppsAppResourcesSecret `json:"secret,omitempty"` + ServingEndpoint *DataSourceAppsAppResourcesServingEndpoint `json:"serving_endpoint,omitempty"` + SqlWarehouse *DataSourceAppsAppResourcesSqlWarehouse `json:"sql_warehouse,omitempty"` +} + +type DataSourceAppsApp struct { + ActiveDeployment *DataSourceAppsAppActiveDeployment `json:"active_deployment,omitempty"` + AppStatus *DataSourceAppsAppAppStatus `json:"app_status,omitempty"` + ComputeStatus *DataSourceAppsAppComputeStatus `json:"compute_status,omitempty"` + CreateTime string `json:"create_time,omitempty"` + Creator string `json:"creator,omitempty"` + DefaultSourceCodePath string `json:"default_source_code_path,omitempty"` + Description string `json:"description,omitempty"` + Name string `json:"name"` + PendingDeployment *DataSourceAppsAppPendingDeployment `json:"pending_deployment,omitempty"` + Resources []DataSourceAppsAppResources `json:"resources,omitempty"` + ServicePrincipalClientId string `json:"service_principal_client_id,omitempty"` + ServicePrincipalId int `json:"service_principal_id,omitempty"` + ServicePrincipalName string `json:"service_principal_name,omitempty"` + UpdateTime string `json:"update_time,omitempty"` + Updater string `json:"updater,omitempty"` + Url string `json:"url,omitempty"` +} + +type DataSourceApps struct { + App []DataSourceAppsApp `json:"app,omitempty"` +} diff --git a/bundle/internal/tf/schema/data_source_cluster.go b/bundle/internal/tf/schema/data_source_cluster.go index 94d67bbfa..38cb534f2 100644 --- a/bundle/internal/tf/schema/data_source_cluster.go +++ b/bundle/internal/tf/schema/data_source_cluster.go @@ -317,6 +317,8 @@ type DataSourceClusterClusterInfoSpec struct { EnableLocalDiskEncryption bool `json:"enable_local_disk_encryption,omitempty"` IdempotencyToken string `json:"idempotency_token,omitempty"` InstancePoolId string `json:"instance_pool_id,omitempty"` + IsSingleNode bool `json:"is_single_node,omitempty"` + Kind string `json:"kind,omitempty"` NodeTypeId string `json:"node_type_id,omitempty"` NumWorkers int `json:"num_workers,omitempty"` PolicyId string `json:"policy_id,omitempty"` @@ -326,6 +328,7 @@ type DataSourceClusterClusterInfoSpec struct { SparkEnvVars map[string]string `json:"spark_env_vars,omitempty"` SparkVersion string `json:"spark_version"` SshPublicKeys []string `json:"ssh_public_keys,omitempty"` + UseMlRuntime bool `json:"use_ml_runtime,omitempty"` Autoscale *DataSourceClusterClusterInfoSpecAutoscale `json:"autoscale,omitempty"` AwsAttributes *DataSourceClusterClusterInfoSpecAwsAttributes `json:"aws_attributes,omitempty"` AzureAttributes *DataSourceClusterClusterInfoSpecAzureAttributes `json:"azure_attributes,omitempty"` @@ -369,7 +372,9 @@ type DataSourceClusterClusterInfo struct { EnableElasticDisk bool `json:"enable_elastic_disk,omitempty"` EnableLocalDiskEncryption bool `json:"enable_local_disk_encryption,omitempty"` InstancePoolId string `json:"instance_pool_id,omitempty"` + IsSingleNode bool `json:"is_single_node,omitempty"` JdbcPort int `json:"jdbc_port,omitempty"` + Kind string `json:"kind,omitempty"` LastRestartedTime int `json:"last_restarted_time,omitempty"` LastStateLossTime int `json:"last_state_loss_time,omitempty"` NodeTypeId string `json:"node_type_id,omitempty"` @@ -386,6 +391,7 @@ type DataSourceClusterClusterInfo struct { State string `json:"state,omitempty"` StateMessage string `json:"state_message,omitempty"` TerminatedTime int `json:"terminated_time,omitempty"` + UseMlRuntime bool `json:"use_ml_runtime,omitempty"` Autoscale *DataSourceClusterClusterInfoAutoscale `json:"autoscale,omitempty"` AwsAttributes *DataSourceClusterClusterInfoAwsAttributes `json:"aws_attributes,omitempty"` AzureAttributes *DataSourceClusterClusterInfoAzureAttributes `json:"azure_attributes,omitempty"` diff --git a/bundle/internal/tf/schema/data_source_functions.go b/bundle/internal/tf/schema/data_source_functions.go index 6085d7522..416db8fc8 100644 --- a/bundle/internal/tf/schema/data_source_functions.go +++ b/bundle/internal/tf/schema/data_source_functions.go @@ -69,6 +69,7 @@ type DataSourceFunctionsFunctions struct { FullDataType string `json:"full_data_type,omitempty"` FullName string `json:"full_name,omitempty"` FunctionId string `json:"function_id,omitempty"` + InputParams []DataSourceFunctionsFunctionsInputParams `json:"input_params,omitempty"` IsDeterministic bool `json:"is_deterministic,omitempty"` IsNullCall bool `json:"is_null_call,omitempty"` MetastoreId string `json:"metastore_id,omitempty"` @@ -76,8 +77,10 @@ type DataSourceFunctionsFunctions struct { Owner string `json:"owner,omitempty"` ParameterStyle string `json:"parameter_style,omitempty"` Properties string `json:"properties,omitempty"` + ReturnParams []DataSourceFunctionsFunctionsReturnParams `json:"return_params,omitempty"` RoutineBody string `json:"routine_body,omitempty"` RoutineDefinition string `json:"routine_definition,omitempty"` + RoutineDependencies []DataSourceFunctionsFunctionsRoutineDependencies `json:"routine_dependencies,omitempty"` SchemaName string `json:"schema_name,omitempty"` SecurityType string `json:"security_type,omitempty"` SpecificName string `json:"specific_name,omitempty"` @@ -85,14 +88,11 @@ type DataSourceFunctionsFunctions struct { SqlPath string `json:"sql_path,omitempty"` UpdatedAt int `json:"updated_at,omitempty"` UpdatedBy string `json:"updated_by,omitempty"` - InputParams []DataSourceFunctionsFunctionsInputParams `json:"input_params,omitempty"` - ReturnParams []DataSourceFunctionsFunctionsReturnParams `json:"return_params,omitempty"` - RoutineDependencies []DataSourceFunctionsFunctionsRoutineDependencies `json:"routine_dependencies,omitempty"` } type DataSourceFunctions struct { CatalogName string `json:"catalog_name"` + Functions []DataSourceFunctionsFunctions `json:"functions,omitempty"` IncludeBrowse bool `json:"include_browse,omitempty"` SchemaName string `json:"schema_name"` - Functions []DataSourceFunctionsFunctions `json:"functions,omitempty"` } diff --git a/bundle/internal/tf/schema/data_source_jobs.go b/bundle/internal/tf/schema/data_source_jobs.go index 98533c0c8..643f7a9f9 100644 --- a/bundle/internal/tf/schema/data_source_jobs.go +++ b/bundle/internal/tf/schema/data_source_jobs.go @@ -3,6 +3,7 @@ package schema type DataSourceJobs struct { - Id string `json:"id,omitempty"` - Ids map[string]string `json:"ids,omitempty"` + Id string `json:"id,omitempty"` + Ids map[string]string `json:"ids,omitempty"` + JobNameContains string `json:"job_name_contains,omitempty"` } diff --git a/bundle/internal/tf/schema/data_source_notification_destinations.go b/bundle/internal/tf/schema/data_source_notification_destinations.go index c95ad6db9..8447b497b 100644 --- a/bundle/internal/tf/schema/data_source_notification_destinations.go +++ b/bundle/internal/tf/schema/data_source_notification_destinations.go @@ -10,6 +10,6 @@ type DataSourceNotificationDestinationsNotificationDestinations struct { type DataSourceNotificationDestinations struct { DisplayNameContains string `json:"display_name_contains,omitempty"` - Type string `json:"type,omitempty"` NotificationDestinations []DataSourceNotificationDestinationsNotificationDestinations `json:"notification_destinations,omitempty"` + Type string `json:"type,omitempty"` } diff --git a/bundle/internal/tf/schema/data_source_registered_model.go b/bundle/internal/tf/schema/data_source_registered_model.go index e19e0849a..41d69ff8f 100644 --- a/bundle/internal/tf/schema/data_source_registered_model.go +++ b/bundle/internal/tf/schema/data_source_registered_model.go @@ -8,6 +8,7 @@ type DataSourceRegisteredModelModelInfoAliases struct { } type DataSourceRegisteredModelModelInfo struct { + Aliases []DataSourceRegisteredModelModelInfoAliases `json:"aliases,omitempty"` BrowseOnly bool `json:"browse_only,omitempty"` CatalogName string `json:"catalog_name,omitempty"` Comment string `json:"comment,omitempty"` @@ -21,7 +22,6 @@ type DataSourceRegisteredModelModelInfo struct { StorageLocation string `json:"storage_location,omitempty"` UpdatedAt int `json:"updated_at,omitempty"` UpdatedBy string `json:"updated_by,omitempty"` - Aliases []DataSourceRegisteredModelModelInfoAliases `json:"aliases,omitempty"` } type DataSourceRegisteredModel struct { diff --git a/bundle/internal/tf/schema/data_source_registered_model_versions.go b/bundle/internal/tf/schema/data_source_registered_model_versions.go index f70e58f85..1a670dfbc 100644 --- a/bundle/internal/tf/schema/data_source_registered_model_versions.go +++ b/bundle/internal/tf/schema/data_source_registered_model_versions.go @@ -25,6 +25,7 @@ type DataSourceRegisteredModelVersionsModelVersionsModelVersionDependencies stru } type DataSourceRegisteredModelVersionsModelVersions struct { + Aliases []DataSourceRegisteredModelVersionsModelVersionsAliases `json:"aliases,omitempty"` BrowseOnly bool `json:"browse_only,omitempty"` CatalogName string `json:"catalog_name,omitempty"` Comment string `json:"comment,omitempty"` @@ -33,6 +34,7 @@ type DataSourceRegisteredModelVersionsModelVersions struct { Id string `json:"id,omitempty"` MetastoreId string `json:"metastore_id,omitempty"` ModelName string `json:"model_name,omitempty"` + ModelVersionDependencies []DataSourceRegisteredModelVersionsModelVersionsModelVersionDependencies `json:"model_version_dependencies,omitempty"` RunId string `json:"run_id,omitempty"` RunWorkspaceId int `json:"run_workspace_id,omitempty"` SchemaName string `json:"schema_name,omitempty"` @@ -42,8 +44,6 @@ type DataSourceRegisteredModelVersionsModelVersions struct { UpdatedAt int `json:"updated_at,omitempty"` UpdatedBy string `json:"updated_by,omitempty"` Version int `json:"version,omitempty"` - Aliases []DataSourceRegisteredModelVersionsModelVersionsAliases `json:"aliases,omitempty"` - ModelVersionDependencies []DataSourceRegisteredModelVersionsModelVersionsModelVersionDependencies `json:"model_version_dependencies,omitempty"` } type DataSourceRegisteredModelVersions struct { diff --git a/bundle/internal/tf/schema/data_source_serving_endpoints.go b/bundle/internal/tf/schema/data_source_serving_endpoints.go index 028121b5a..bdfd778e0 100644 --- a/bundle/internal/tf/schema/data_source_serving_endpoints.go +++ b/bundle/internal/tf/schema/data_source_serving_endpoints.go @@ -8,9 +8,9 @@ type DataSourceServingEndpointsEndpointsAiGatewayGuardrailsInputPii struct { type DataSourceServingEndpointsEndpointsAiGatewayGuardrailsInput struct { InvalidKeywords []string `json:"invalid_keywords,omitempty"` + Pii []DataSourceServingEndpointsEndpointsAiGatewayGuardrailsInputPii `json:"pii,omitempty"` Safety bool `json:"safety,omitempty"` ValidTopics []string `json:"valid_topics,omitempty"` - Pii []DataSourceServingEndpointsEndpointsAiGatewayGuardrailsInputPii `json:"pii,omitempty"` } type DataSourceServingEndpointsEndpointsAiGatewayGuardrailsOutputPii struct { @@ -19,9 +19,9 @@ type DataSourceServingEndpointsEndpointsAiGatewayGuardrailsOutputPii struct { type DataSourceServingEndpointsEndpointsAiGatewayGuardrailsOutput struct { InvalidKeywords []string `json:"invalid_keywords,omitempty"` + Pii []DataSourceServingEndpointsEndpointsAiGatewayGuardrailsOutputPii `json:"pii,omitempty"` Safety bool `json:"safety,omitempty"` ValidTopics []string `json:"valid_topics,omitempty"` - Pii []DataSourceServingEndpointsEndpointsAiGatewayGuardrailsOutputPii `json:"pii,omitempty"` } type DataSourceServingEndpointsEndpointsAiGatewayGuardrails struct { @@ -111,17 +111,17 @@ type DataSourceServingEndpointsEndpointsConfigServedEntitiesExternalModelPalmCon } type DataSourceServingEndpointsEndpointsConfigServedEntitiesExternalModel struct { - Name string `json:"name"` - Provider string `json:"provider"` - Task string `json:"task"` Ai21LabsConfig []DataSourceServingEndpointsEndpointsConfigServedEntitiesExternalModelAi21LabsConfig `json:"ai21labs_config,omitempty"` AmazonBedrockConfig []DataSourceServingEndpointsEndpointsConfigServedEntitiesExternalModelAmazonBedrockConfig `json:"amazon_bedrock_config,omitempty"` AnthropicConfig []DataSourceServingEndpointsEndpointsConfigServedEntitiesExternalModelAnthropicConfig `json:"anthropic_config,omitempty"` CohereConfig []DataSourceServingEndpointsEndpointsConfigServedEntitiesExternalModelCohereConfig `json:"cohere_config,omitempty"` DatabricksModelServingConfig []DataSourceServingEndpointsEndpointsConfigServedEntitiesExternalModelDatabricksModelServingConfig `json:"databricks_model_serving_config,omitempty"` GoogleCloudVertexAiConfig []DataSourceServingEndpointsEndpointsConfigServedEntitiesExternalModelGoogleCloudVertexAiConfig `json:"google_cloud_vertex_ai_config,omitempty"` + Name string `json:"name"` OpenaiConfig []DataSourceServingEndpointsEndpointsConfigServedEntitiesExternalModelOpenaiConfig `json:"openai_config,omitempty"` PalmConfig []DataSourceServingEndpointsEndpointsConfigServedEntitiesExternalModelPalmConfig `json:"palm_config,omitempty"` + Provider string `json:"provider"` + Task string `json:"task"` } type DataSourceServingEndpointsEndpointsConfigServedEntitiesFoundationModel struct { @@ -134,9 +134,9 @@ type DataSourceServingEndpointsEndpointsConfigServedEntitiesFoundationModel stru type DataSourceServingEndpointsEndpointsConfigServedEntities struct { EntityName string `json:"entity_name,omitempty"` EntityVersion string `json:"entity_version,omitempty"` - Name string `json:"name,omitempty"` ExternalModel []DataSourceServingEndpointsEndpointsConfigServedEntitiesExternalModel `json:"external_model,omitempty"` FoundationModel []DataSourceServingEndpointsEndpointsConfigServedEntitiesFoundationModel `json:"foundation_model,omitempty"` + Name string `json:"name,omitempty"` } type DataSourceServingEndpointsEndpointsConfigServedModels struct { @@ -161,16 +161,16 @@ type DataSourceServingEndpointsEndpointsTags struct { } type DataSourceServingEndpointsEndpoints struct { + AiGateway []DataSourceServingEndpointsEndpointsAiGateway `json:"ai_gateway,omitempty"` + Config []DataSourceServingEndpointsEndpointsConfig `json:"config,omitempty"` CreationTimestamp int `json:"creation_timestamp,omitempty"` Creator string `json:"creator,omitempty"` Id string `json:"id,omitempty"` LastUpdatedTimestamp int `json:"last_updated_timestamp,omitempty"` Name string `json:"name,omitempty"` - Task string `json:"task,omitempty"` - AiGateway []DataSourceServingEndpointsEndpointsAiGateway `json:"ai_gateway,omitempty"` - Config []DataSourceServingEndpointsEndpointsConfig `json:"config,omitempty"` State []DataSourceServingEndpointsEndpointsState `json:"state,omitempty"` Tags []DataSourceServingEndpointsEndpointsTags `json:"tags,omitempty"` + Task string `json:"task,omitempty"` } type DataSourceServingEndpoints struct { diff --git a/bundle/internal/tf/schema/data_sources.go b/bundle/internal/tf/schema/data_sources.go index 3a59bf8c3..1880db25a 100644 --- a/bundle/internal/tf/schema/data_sources.go +++ b/bundle/internal/tf/schema/data_sources.go @@ -3,6 +3,8 @@ package schema type DataSources struct { + App map[string]any `json:"databricks_app,omitempty"` + Apps map[string]any `json:"databricks_apps,omitempty"` AwsAssumeRolePolicy map[string]any `json:"databricks_aws_assume_role_policy,omitempty"` AwsBucketPolicy map[string]any `json:"databricks_aws_bucket_policy,omitempty"` AwsCrossaccountPolicy map[string]any `json:"databricks_aws_crossaccount_policy,omitempty"` @@ -66,6 +68,8 @@ type DataSources struct { func NewDataSources() *DataSources { return &DataSources{ + App: make(map[string]any), + Apps: make(map[string]any), AwsAssumeRolePolicy: make(map[string]any), AwsBucketPolicy: make(map[string]any), AwsCrossaccountPolicy: make(map[string]any), diff --git a/bundle/internal/tf/schema/resource_app.go b/bundle/internal/tf/schema/resource_app.go new file mode 100644 index 000000000..14c93b793 --- /dev/null +++ b/bundle/internal/tf/schema/resource_app.go @@ -0,0 +1,102 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. + +package schema + +type ResourceAppActiveDeploymentDeploymentArtifacts struct { + SourceCodePath string `json:"source_code_path,omitempty"` +} + +type ResourceAppActiveDeploymentStatus struct { + Message string `json:"message,omitempty"` + State string `json:"state,omitempty"` +} + +type ResourceAppActiveDeployment struct { + CreateTime string `json:"create_time,omitempty"` + Creator string `json:"creator,omitempty"` + DeploymentArtifacts *ResourceAppActiveDeploymentDeploymentArtifacts `json:"deployment_artifacts,omitempty"` + DeploymentId string `json:"deployment_id,omitempty"` + Mode string `json:"mode,omitempty"` + SourceCodePath string `json:"source_code_path,omitempty"` + Status *ResourceAppActiveDeploymentStatus `json:"status,omitempty"` + UpdateTime string `json:"update_time,omitempty"` +} + +type ResourceAppAppStatus struct { + Message string `json:"message,omitempty"` + State string `json:"state,omitempty"` +} + +type ResourceAppComputeStatus struct { + Message string `json:"message,omitempty"` + State string `json:"state,omitempty"` +} + +type ResourceAppPendingDeploymentDeploymentArtifacts struct { + SourceCodePath string `json:"source_code_path,omitempty"` +} + +type ResourceAppPendingDeploymentStatus struct { + Message string `json:"message,omitempty"` + State string `json:"state,omitempty"` +} + +type ResourceAppPendingDeployment struct { + CreateTime string `json:"create_time,omitempty"` + Creator string `json:"creator,omitempty"` + DeploymentArtifacts *ResourceAppPendingDeploymentDeploymentArtifacts `json:"deployment_artifacts,omitempty"` + DeploymentId string `json:"deployment_id,omitempty"` + Mode string `json:"mode,omitempty"` + SourceCodePath string `json:"source_code_path,omitempty"` + Status *ResourceAppPendingDeploymentStatus `json:"status,omitempty"` + UpdateTime string `json:"update_time,omitempty"` +} + +type ResourceAppResourcesJob struct { + Id string `json:"id"` + Permission string `json:"permission"` +} + +type ResourceAppResourcesSecret struct { + Key string `json:"key"` + Permission string `json:"permission"` + Scope string `json:"scope"` +} + +type ResourceAppResourcesServingEndpoint struct { + Name string `json:"name"` + Permission string `json:"permission"` +} + +type ResourceAppResourcesSqlWarehouse struct { + Id string `json:"id"` + Permission string `json:"permission"` +} + +type ResourceAppResources struct { + Description string `json:"description,omitempty"` + Job *ResourceAppResourcesJob `json:"job,omitempty"` + Name string `json:"name"` + Secret *ResourceAppResourcesSecret `json:"secret,omitempty"` + ServingEndpoint *ResourceAppResourcesServingEndpoint `json:"serving_endpoint,omitempty"` + SqlWarehouse *ResourceAppResourcesSqlWarehouse `json:"sql_warehouse,omitempty"` +} + +type ResourceApp struct { + ActiveDeployment *ResourceAppActiveDeployment `json:"active_deployment,omitempty"` + AppStatus *ResourceAppAppStatus `json:"app_status,omitempty"` + ComputeStatus *ResourceAppComputeStatus `json:"compute_status,omitempty"` + CreateTime string `json:"create_time,omitempty"` + Creator string `json:"creator,omitempty"` + DefaultSourceCodePath string `json:"default_source_code_path,omitempty"` + Description string `json:"description,omitempty"` + Name string `json:"name"` + PendingDeployment *ResourceAppPendingDeployment `json:"pending_deployment,omitempty"` + Resources []ResourceAppResources `json:"resources,omitempty"` + ServicePrincipalClientId string `json:"service_principal_client_id,omitempty"` + ServicePrincipalId int `json:"service_principal_id,omitempty"` + ServicePrincipalName string `json:"service_principal_name,omitempty"` + UpdateTime string `json:"update_time,omitempty"` + Updater string `json:"updater,omitempty"` + Url string `json:"url,omitempty"` +} diff --git a/bundle/internal/tf/schema/resource_azure_adls_gen2_mount.go b/bundle/internal/tf/schema/resource_azure_adls_gen2_mount.go index d0f96d54e..6e2ea08e8 100644 --- a/bundle/internal/tf/schema/resource_azure_adls_gen2_mount.go +++ b/bundle/internal/tf/schema/resource_azure_adls_gen2_mount.go @@ -9,6 +9,7 @@ type ResourceAzureAdlsGen2Mount struct { ClusterId string `json:"cluster_id,omitempty"` ContainerName string `json:"container_name"` Directory string `json:"directory,omitempty"` + Environment string `json:"environment,omitempty"` Id string `json:"id,omitempty"` InitializeFileSystem bool `json:"initialize_file_system"` MountName string `json:"mount_name"` diff --git a/bundle/internal/tf/schema/resource_cluster.go b/bundle/internal/tf/schema/resource_cluster.go index 4ae063c89..50395add9 100644 --- a/bundle/internal/tf/schema/resource_cluster.go +++ b/bundle/internal/tf/schema/resource_cluster.go @@ -176,6 +176,8 @@ type ResourceCluster struct { IdempotencyToken string `json:"idempotency_token,omitempty"` InstancePoolId string `json:"instance_pool_id,omitempty"` IsPinned bool `json:"is_pinned,omitempty"` + IsSingleNode bool `json:"is_single_node,omitempty"` + Kind string `json:"kind,omitempty"` NoWait bool `json:"no_wait,omitempty"` NodeTypeId string `json:"node_type_id,omitempty"` NumWorkers int `json:"num_workers,omitempty"` @@ -188,6 +190,7 @@ type ResourceCluster struct { SshPublicKeys []string `json:"ssh_public_keys,omitempty"` State string `json:"state,omitempty"` Url string `json:"url,omitempty"` + UseMlRuntime bool `json:"use_ml_runtime,omitempty"` Autoscale *ResourceClusterAutoscale `json:"autoscale,omitempty"` AwsAttributes *ResourceClusterAwsAttributes `json:"aws_attributes,omitempty"` AzureAttributes *ResourceClusterAzureAttributes `json:"azure_attributes,omitempty"` diff --git a/bundle/internal/tf/schema/resource_credential.go b/bundle/internal/tf/schema/resource_credential.go new file mode 100644 index 000000000..9d47219ea --- /dev/null +++ b/bundle/internal/tf/schema/resource_credential.go @@ -0,0 +1,52 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. + +package schema + +type ResourceCredentialAwsIamRole struct { + ExternalId string `json:"external_id,omitempty"` + RoleArn string `json:"role_arn,omitempty"` + UnityCatalogIamArn string `json:"unity_catalog_iam_arn,omitempty"` +} + +type ResourceCredentialAzureManagedIdentity struct { + AccessConnectorId string `json:"access_connector_id"` + CredentialId string `json:"credential_id,omitempty"` + ManagedIdentityId string `json:"managed_identity_id,omitempty"` +} + +type ResourceCredentialAzureServicePrincipal struct { + ApplicationId string `json:"application_id"` + ClientSecret string `json:"client_secret"` + DirectoryId string `json:"directory_id"` +} + +type ResourceCredentialDatabricksGcpServiceAccount struct { + CredentialId string `json:"credential_id,omitempty"` + Email string `json:"email,omitempty"` + PrivateKeyId string `json:"private_key_id,omitempty"` +} + +type ResourceCredential struct { + Comment string `json:"comment,omitempty"` + CreatedAt int `json:"created_at,omitempty"` + CreatedBy string `json:"created_by,omitempty"` + CredentialId string `json:"credential_id,omitempty"` + ForceDestroy bool `json:"force_destroy,omitempty"` + ForceUpdate bool `json:"force_update,omitempty"` + FullName string `json:"full_name,omitempty"` + Id string `json:"id,omitempty"` + IsolationMode string `json:"isolation_mode,omitempty"` + MetastoreId string `json:"metastore_id,omitempty"` + Name string `json:"name"` + Owner string `json:"owner,omitempty"` + Purpose string `json:"purpose"` + ReadOnly bool `json:"read_only,omitempty"` + SkipValidation bool `json:"skip_validation,omitempty"` + UpdatedAt int `json:"updated_at,omitempty"` + UpdatedBy string `json:"updated_by,omitempty"` + UsedForManagedStorage bool `json:"used_for_managed_storage,omitempty"` + AwsIamRole *ResourceCredentialAwsIamRole `json:"aws_iam_role,omitempty"` + AzureManagedIdentity *ResourceCredentialAzureManagedIdentity `json:"azure_managed_identity,omitempty"` + AzureServicePrincipal *ResourceCredentialAzureServicePrincipal `json:"azure_service_principal,omitempty"` + DatabricksGcpServiceAccount *ResourceCredentialDatabricksGcpServiceAccount `json:"databricks_gcp_service_account,omitempty"` +} diff --git a/bundle/internal/tf/schema/resource_grant.go b/bundle/internal/tf/schema/resource_grant.go index d8569f304..6ed97791c 100644 --- a/bundle/internal/tf/schema/resource_grant.go +++ b/bundle/internal/tf/schema/resource_grant.go @@ -4,6 +4,7 @@ package schema type ResourceGrant struct { Catalog string `json:"catalog,omitempty"` + Credential string `json:"credential,omitempty"` ExternalLocation string `json:"external_location,omitempty"` ForeignConnection string `json:"foreign_connection,omitempty"` Function string `json:"function,omitempty"` diff --git a/bundle/internal/tf/schema/resource_grants.go b/bundle/internal/tf/schema/resource_grants.go index dd00152fb..474a9950f 100644 --- a/bundle/internal/tf/schema/resource_grants.go +++ b/bundle/internal/tf/schema/resource_grants.go @@ -9,6 +9,7 @@ type ResourceGrantsGrant struct { type ResourceGrants struct { Catalog string `json:"catalog,omitempty"` + Credential string `json:"credential,omitempty"` ExternalLocation string `json:"external_location,omitempty"` ForeignConnection string `json:"foreign_connection,omitempty"` Function string `json:"function,omitempty"` diff --git a/bundle/internal/tf/schema/resource_job.go b/bundle/internal/tf/schema/resource_job.go index c89eafab9..63c8aeb7b 100644 --- a/bundle/internal/tf/schema/resource_job.go +++ b/bundle/internal/tf/schema/resource_job.go @@ -240,6 +240,8 @@ type ResourceJobJobClusterNewCluster struct { EnableLocalDiskEncryption bool `json:"enable_local_disk_encryption,omitempty"` IdempotencyToken string `json:"idempotency_token,omitempty"` InstancePoolId string `json:"instance_pool_id,omitempty"` + IsSingleNode bool `json:"is_single_node,omitempty"` + Kind string `json:"kind,omitempty"` NodeTypeId string `json:"node_type_id,omitempty"` NumWorkers int `json:"num_workers,omitempty"` PolicyId string `json:"policy_id,omitempty"` @@ -249,6 +251,7 @@ type ResourceJobJobClusterNewCluster struct { SparkEnvVars map[string]string `json:"spark_env_vars,omitempty"` SparkVersion string `json:"spark_version"` SshPublicKeys []string `json:"ssh_public_keys,omitempty"` + UseMlRuntime bool `json:"use_ml_runtime,omitempty"` Autoscale *ResourceJobJobClusterNewClusterAutoscale `json:"autoscale,omitempty"` AwsAttributes *ResourceJobJobClusterNewClusterAwsAttributes `json:"aws_attributes,omitempty"` AzureAttributes *ResourceJobJobClusterNewClusterAzureAttributes `json:"azure_attributes,omitempty"` @@ -462,6 +465,8 @@ type ResourceJobNewCluster struct { EnableLocalDiskEncryption bool `json:"enable_local_disk_encryption,omitempty"` IdempotencyToken string `json:"idempotency_token,omitempty"` InstancePoolId string `json:"instance_pool_id,omitempty"` + IsSingleNode bool `json:"is_single_node,omitempty"` + Kind string `json:"kind,omitempty"` NodeTypeId string `json:"node_type_id,omitempty"` NumWorkers int `json:"num_workers,omitempty"` PolicyId string `json:"policy_id,omitempty"` @@ -471,6 +476,7 @@ type ResourceJobNewCluster struct { SparkEnvVars map[string]string `json:"spark_env_vars,omitempty"` SparkVersion string `json:"spark_version"` SshPublicKeys []string `json:"ssh_public_keys,omitempty"` + UseMlRuntime bool `json:"use_ml_runtime,omitempty"` Autoscale *ResourceJobNewClusterAutoscale `json:"autoscale,omitempty"` AwsAttributes *ResourceJobNewClusterAwsAttributes `json:"aws_attributes,omitempty"` AzureAttributes *ResourceJobNewClusterAzureAttributes `json:"azure_attributes,omitempty"` @@ -548,6 +554,13 @@ type ResourceJobSparkSubmitTask struct { Parameters []string `json:"parameters,omitempty"` } +type ResourceJobTaskCleanRoomsNotebookTask struct { + CleanRoomName string `json:"clean_room_name"` + Etag string `json:"etag,omitempty"` + NotebookBaseParameters map[string]string `json:"notebook_base_parameters,omitempty"` + NotebookName string `json:"notebook_name"` +} + type ResourceJobTaskConditionTask struct { Left string `json:"left"` Op string `json:"op"` @@ -578,6 +591,13 @@ type ResourceJobTaskEmailNotifications struct { OnSuccess []string `json:"on_success,omitempty"` } +type ResourceJobTaskForEachTaskTaskCleanRoomsNotebookTask struct { + CleanRoomName string `json:"clean_room_name"` + Etag string `json:"etag,omitempty"` + NotebookBaseParameters map[string]string `json:"notebook_base_parameters,omitempty"` + NotebookName string `json:"notebook_name"` +} + type ResourceJobTaskForEachTaskTaskConditionTask struct { Left string `json:"left"` Op string `json:"op"` @@ -814,6 +834,8 @@ type ResourceJobTaskForEachTaskTaskNewCluster struct { EnableLocalDiskEncryption bool `json:"enable_local_disk_encryption,omitempty"` IdempotencyToken string `json:"idempotency_token,omitempty"` InstancePoolId string `json:"instance_pool_id,omitempty"` + IsSingleNode bool `json:"is_single_node,omitempty"` + Kind string `json:"kind,omitempty"` NodeTypeId string `json:"node_type_id,omitempty"` NumWorkers int `json:"num_workers,omitempty"` PolicyId string `json:"policy_id,omitempty"` @@ -823,6 +845,7 @@ type ResourceJobTaskForEachTaskTaskNewCluster struct { SparkEnvVars map[string]string `json:"spark_env_vars,omitempty"` SparkVersion string `json:"spark_version"` SshPublicKeys []string `json:"ssh_public_keys,omitempty"` + UseMlRuntime bool `json:"use_ml_runtime,omitempty"` Autoscale *ResourceJobTaskForEachTaskTaskNewClusterAutoscale `json:"autoscale,omitempty"` AwsAttributes *ResourceJobTaskForEachTaskTaskNewClusterAwsAttributes `json:"aws_attributes,omitempty"` AzureAttributes *ResourceJobTaskForEachTaskTaskNewClusterAzureAttributes `json:"azure_attributes,omitempty"` @@ -963,34 +986,35 @@ type ResourceJobTaskForEachTaskTaskWebhookNotifications struct { } type ResourceJobTaskForEachTaskTask struct { - Description string `json:"description,omitempty"` - DisableAutoOptimization bool `json:"disable_auto_optimization,omitempty"` - EnvironmentKey string `json:"environment_key,omitempty"` - ExistingClusterId string `json:"existing_cluster_id,omitempty"` - JobClusterKey string `json:"job_cluster_key,omitempty"` - MaxRetries int `json:"max_retries,omitempty"` - MinRetryIntervalMillis int `json:"min_retry_interval_millis,omitempty"` - RetryOnTimeout bool `json:"retry_on_timeout,omitempty"` - RunIf string `json:"run_if,omitempty"` - TaskKey string `json:"task_key"` - TimeoutSeconds int `json:"timeout_seconds,omitempty"` - ConditionTask *ResourceJobTaskForEachTaskTaskConditionTask `json:"condition_task,omitempty"` - DbtTask *ResourceJobTaskForEachTaskTaskDbtTask `json:"dbt_task,omitempty"` - DependsOn []ResourceJobTaskForEachTaskTaskDependsOn `json:"depends_on,omitempty"` - EmailNotifications *ResourceJobTaskForEachTaskTaskEmailNotifications `json:"email_notifications,omitempty"` - Health *ResourceJobTaskForEachTaskTaskHealth `json:"health,omitempty"` - Library []ResourceJobTaskForEachTaskTaskLibrary `json:"library,omitempty"` - NewCluster *ResourceJobTaskForEachTaskTaskNewCluster `json:"new_cluster,omitempty"` - NotebookTask *ResourceJobTaskForEachTaskTaskNotebookTask `json:"notebook_task,omitempty"` - NotificationSettings *ResourceJobTaskForEachTaskTaskNotificationSettings `json:"notification_settings,omitempty"` - PipelineTask *ResourceJobTaskForEachTaskTaskPipelineTask `json:"pipeline_task,omitempty"` - PythonWheelTask *ResourceJobTaskForEachTaskTaskPythonWheelTask `json:"python_wheel_task,omitempty"` - RunJobTask *ResourceJobTaskForEachTaskTaskRunJobTask `json:"run_job_task,omitempty"` - SparkJarTask *ResourceJobTaskForEachTaskTaskSparkJarTask `json:"spark_jar_task,omitempty"` - SparkPythonTask *ResourceJobTaskForEachTaskTaskSparkPythonTask `json:"spark_python_task,omitempty"` - SparkSubmitTask *ResourceJobTaskForEachTaskTaskSparkSubmitTask `json:"spark_submit_task,omitempty"` - SqlTask *ResourceJobTaskForEachTaskTaskSqlTask `json:"sql_task,omitempty"` - WebhookNotifications *ResourceJobTaskForEachTaskTaskWebhookNotifications `json:"webhook_notifications,omitempty"` + Description string `json:"description,omitempty"` + DisableAutoOptimization bool `json:"disable_auto_optimization,omitempty"` + EnvironmentKey string `json:"environment_key,omitempty"` + ExistingClusterId string `json:"existing_cluster_id,omitempty"` + JobClusterKey string `json:"job_cluster_key,omitempty"` + MaxRetries int `json:"max_retries,omitempty"` + MinRetryIntervalMillis int `json:"min_retry_interval_millis,omitempty"` + RetryOnTimeout bool `json:"retry_on_timeout,omitempty"` + RunIf string `json:"run_if,omitempty"` + TaskKey string `json:"task_key"` + TimeoutSeconds int `json:"timeout_seconds,omitempty"` + CleanRoomsNotebookTask *ResourceJobTaskForEachTaskTaskCleanRoomsNotebookTask `json:"clean_rooms_notebook_task,omitempty"` + ConditionTask *ResourceJobTaskForEachTaskTaskConditionTask `json:"condition_task,omitempty"` + DbtTask *ResourceJobTaskForEachTaskTaskDbtTask `json:"dbt_task,omitempty"` + DependsOn []ResourceJobTaskForEachTaskTaskDependsOn `json:"depends_on,omitempty"` + EmailNotifications *ResourceJobTaskForEachTaskTaskEmailNotifications `json:"email_notifications,omitempty"` + Health *ResourceJobTaskForEachTaskTaskHealth `json:"health,omitempty"` + Library []ResourceJobTaskForEachTaskTaskLibrary `json:"library,omitempty"` + NewCluster *ResourceJobTaskForEachTaskTaskNewCluster `json:"new_cluster,omitempty"` + NotebookTask *ResourceJobTaskForEachTaskTaskNotebookTask `json:"notebook_task,omitempty"` + NotificationSettings *ResourceJobTaskForEachTaskTaskNotificationSettings `json:"notification_settings,omitempty"` + PipelineTask *ResourceJobTaskForEachTaskTaskPipelineTask `json:"pipeline_task,omitempty"` + PythonWheelTask *ResourceJobTaskForEachTaskTaskPythonWheelTask `json:"python_wheel_task,omitempty"` + RunJobTask *ResourceJobTaskForEachTaskTaskRunJobTask `json:"run_job_task,omitempty"` + SparkJarTask *ResourceJobTaskForEachTaskTaskSparkJarTask `json:"spark_jar_task,omitempty"` + SparkPythonTask *ResourceJobTaskForEachTaskTaskSparkPythonTask `json:"spark_python_task,omitempty"` + SparkSubmitTask *ResourceJobTaskForEachTaskTaskSparkSubmitTask `json:"spark_submit_task,omitempty"` + SqlTask *ResourceJobTaskForEachTaskTaskSqlTask `json:"sql_task,omitempty"` + WebhookNotifications *ResourceJobTaskForEachTaskTaskWebhookNotifications `json:"webhook_notifications,omitempty"` } type ResourceJobTaskForEachTask struct { @@ -1205,6 +1229,8 @@ type ResourceJobTaskNewCluster struct { EnableLocalDiskEncryption bool `json:"enable_local_disk_encryption,omitempty"` IdempotencyToken string `json:"idempotency_token,omitempty"` InstancePoolId string `json:"instance_pool_id,omitempty"` + IsSingleNode bool `json:"is_single_node,omitempty"` + Kind string `json:"kind,omitempty"` NodeTypeId string `json:"node_type_id,omitempty"` NumWorkers int `json:"num_workers,omitempty"` PolicyId string `json:"policy_id,omitempty"` @@ -1214,6 +1240,7 @@ type ResourceJobTaskNewCluster struct { SparkEnvVars map[string]string `json:"spark_env_vars,omitempty"` SparkVersion string `json:"spark_version"` SshPublicKeys []string `json:"ssh_public_keys,omitempty"` + UseMlRuntime bool `json:"use_ml_runtime,omitempty"` Autoscale *ResourceJobTaskNewClusterAutoscale `json:"autoscale,omitempty"` AwsAttributes *ResourceJobTaskNewClusterAwsAttributes `json:"aws_attributes,omitempty"` AzureAttributes *ResourceJobTaskNewClusterAzureAttributes `json:"azure_attributes,omitempty"` @@ -1354,35 +1381,36 @@ type ResourceJobTaskWebhookNotifications struct { } type ResourceJobTask struct { - Description string `json:"description,omitempty"` - DisableAutoOptimization bool `json:"disable_auto_optimization,omitempty"` - EnvironmentKey string `json:"environment_key,omitempty"` - ExistingClusterId string `json:"existing_cluster_id,omitempty"` - JobClusterKey string `json:"job_cluster_key,omitempty"` - MaxRetries int `json:"max_retries,omitempty"` - MinRetryIntervalMillis int `json:"min_retry_interval_millis,omitempty"` - RetryOnTimeout bool `json:"retry_on_timeout,omitempty"` - RunIf string `json:"run_if,omitempty"` - TaskKey string `json:"task_key"` - TimeoutSeconds int `json:"timeout_seconds,omitempty"` - ConditionTask *ResourceJobTaskConditionTask `json:"condition_task,omitempty"` - DbtTask *ResourceJobTaskDbtTask `json:"dbt_task,omitempty"` - DependsOn []ResourceJobTaskDependsOn `json:"depends_on,omitempty"` - EmailNotifications *ResourceJobTaskEmailNotifications `json:"email_notifications,omitempty"` - ForEachTask *ResourceJobTaskForEachTask `json:"for_each_task,omitempty"` - Health *ResourceJobTaskHealth `json:"health,omitempty"` - Library []ResourceJobTaskLibrary `json:"library,omitempty"` - NewCluster *ResourceJobTaskNewCluster `json:"new_cluster,omitempty"` - NotebookTask *ResourceJobTaskNotebookTask `json:"notebook_task,omitempty"` - NotificationSettings *ResourceJobTaskNotificationSettings `json:"notification_settings,omitempty"` - PipelineTask *ResourceJobTaskPipelineTask `json:"pipeline_task,omitempty"` - PythonWheelTask *ResourceJobTaskPythonWheelTask `json:"python_wheel_task,omitempty"` - RunJobTask *ResourceJobTaskRunJobTask `json:"run_job_task,omitempty"` - SparkJarTask *ResourceJobTaskSparkJarTask `json:"spark_jar_task,omitempty"` - SparkPythonTask *ResourceJobTaskSparkPythonTask `json:"spark_python_task,omitempty"` - SparkSubmitTask *ResourceJobTaskSparkSubmitTask `json:"spark_submit_task,omitempty"` - SqlTask *ResourceJobTaskSqlTask `json:"sql_task,omitempty"` - WebhookNotifications *ResourceJobTaskWebhookNotifications `json:"webhook_notifications,omitempty"` + Description string `json:"description,omitempty"` + DisableAutoOptimization bool `json:"disable_auto_optimization,omitempty"` + EnvironmentKey string `json:"environment_key,omitempty"` + ExistingClusterId string `json:"existing_cluster_id,omitempty"` + JobClusterKey string `json:"job_cluster_key,omitempty"` + MaxRetries int `json:"max_retries,omitempty"` + MinRetryIntervalMillis int `json:"min_retry_interval_millis,omitempty"` + RetryOnTimeout bool `json:"retry_on_timeout,omitempty"` + RunIf string `json:"run_if,omitempty"` + TaskKey string `json:"task_key"` + TimeoutSeconds int `json:"timeout_seconds,omitempty"` + CleanRoomsNotebookTask *ResourceJobTaskCleanRoomsNotebookTask `json:"clean_rooms_notebook_task,omitempty"` + ConditionTask *ResourceJobTaskConditionTask `json:"condition_task,omitempty"` + DbtTask *ResourceJobTaskDbtTask `json:"dbt_task,omitempty"` + DependsOn []ResourceJobTaskDependsOn `json:"depends_on,omitempty"` + EmailNotifications *ResourceJobTaskEmailNotifications `json:"email_notifications,omitempty"` + ForEachTask *ResourceJobTaskForEachTask `json:"for_each_task,omitempty"` + Health *ResourceJobTaskHealth `json:"health,omitempty"` + Library []ResourceJobTaskLibrary `json:"library,omitempty"` + NewCluster *ResourceJobTaskNewCluster `json:"new_cluster,omitempty"` + NotebookTask *ResourceJobTaskNotebookTask `json:"notebook_task,omitempty"` + NotificationSettings *ResourceJobTaskNotificationSettings `json:"notification_settings,omitempty"` + PipelineTask *ResourceJobTaskPipelineTask `json:"pipeline_task,omitempty"` + PythonWheelTask *ResourceJobTaskPythonWheelTask `json:"python_wheel_task,omitempty"` + RunJobTask *ResourceJobTaskRunJobTask `json:"run_job_task,omitempty"` + SparkJarTask *ResourceJobTaskSparkJarTask `json:"spark_jar_task,omitempty"` + SparkPythonTask *ResourceJobTaskSparkPythonTask `json:"spark_python_task,omitempty"` + SparkSubmitTask *ResourceJobTaskSparkSubmitTask `json:"spark_submit_task,omitempty"` + SqlTask *ResourceJobTaskSqlTask `json:"sql_task,omitempty"` + WebhookNotifications *ResourceJobTaskWebhookNotifications `json:"webhook_notifications,omitempty"` } type ResourceJobTriggerFileArrival struct { diff --git a/bundle/internal/tf/schema/resource_permissions.go b/bundle/internal/tf/schema/resource_permissions.go index a3d05e6f2..7dfb84b5f 100644 --- a/bundle/internal/tf/schema/resource_permissions.go +++ b/bundle/internal/tf/schema/resource_permissions.go @@ -10,6 +10,7 @@ type ResourcePermissionsAccessControl struct { } type ResourcePermissions struct { + AppName string `json:"app_name,omitempty"` Authorization string `json:"authorization,omitempty"` ClusterId string `json:"cluster_id,omitempty"` ClusterPolicyId string `json:"cluster_policy_id,omitempty"` diff --git a/bundle/internal/tf/schema/resource_pipeline.go b/bundle/internal/tf/schema/resource_pipeline.go index 7238d24a8..ebdb85027 100644 --- a/bundle/internal/tf/schema/resource_pipeline.go +++ b/bundle/internal/tf/schema/resource_pipeline.go @@ -244,9 +244,9 @@ type ResourcePipelineNotification struct { } type ResourcePipelineRestartWindow struct { - DaysOfWeek string `json:"days_of_week,omitempty"` - StartHour int `json:"start_hour"` - TimeZoneId string `json:"time_zone_id,omitempty"` + DaysOfWeek []string `json:"days_of_week,omitempty"` + StartHour int `json:"start_hour"` + TimeZoneId string `json:"time_zone_id,omitempty"` } type ResourcePipelineTriggerCron struct { diff --git a/bundle/internal/tf/schema/resources.go b/bundle/internal/tf/schema/resources.go index ea5b618fd..b57c2711a 100644 --- a/bundle/internal/tf/schema/resources.go +++ b/bundle/internal/tf/schema/resources.go @@ -5,6 +5,7 @@ package schema type Resources struct { AccessControlRuleSet map[string]any `json:"databricks_access_control_rule_set,omitempty"` Alert map[string]any `json:"databricks_alert,omitempty"` + App map[string]any `json:"databricks_app,omitempty"` ArtifactAllowlist map[string]any `json:"databricks_artifact_allowlist,omitempty"` AutomaticClusterUpdateWorkspaceSetting map[string]any `json:"databricks_automatic_cluster_update_workspace_setting,omitempty"` AwsS3Mount map[string]any `json:"databricks_aws_s3_mount,omitempty"` @@ -18,6 +19,7 @@ type Resources struct { ClusterPolicy map[string]any `json:"databricks_cluster_policy,omitempty"` ComplianceSecurityProfileWorkspaceSetting map[string]any `json:"databricks_compliance_security_profile_workspace_setting,omitempty"` Connection map[string]any `json:"databricks_connection,omitempty"` + Credential map[string]any `json:"databricks_credential,omitempty"` CustomAppIntegration map[string]any `json:"databricks_custom_app_integration,omitempty"` Dashboard map[string]any `json:"databricks_dashboard,omitempty"` DbfsFile map[string]any `json:"databricks_dbfs_file,omitempty"` @@ -111,6 +113,7 @@ func NewResources() *Resources { return &Resources{ AccessControlRuleSet: make(map[string]any), Alert: make(map[string]any), + App: make(map[string]any), ArtifactAllowlist: make(map[string]any), AutomaticClusterUpdateWorkspaceSetting: make(map[string]any), AwsS3Mount: make(map[string]any), @@ -124,6 +127,7 @@ func NewResources() *Resources { ClusterPolicy: make(map[string]any), ComplianceSecurityProfileWorkspaceSetting: make(map[string]any), Connection: make(map[string]any), + Credential: make(map[string]any), CustomAppIntegration: make(map[string]any), Dashboard: make(map[string]any), DbfsFile: make(map[string]any), diff --git a/bundle/internal/tf/schema/root.go b/bundle/internal/tf/schema/root.go index 2cadb8090..1f89dc64d 100644 --- a/bundle/internal/tf/schema/root.go +++ b/bundle/internal/tf/schema/root.go @@ -21,7 +21,7 @@ type Root struct { const ProviderHost = "registry.terraform.io" const ProviderSource = "databricks/databricks" -const ProviderVersion = "1.59.0" +const ProviderVersion = "1.62.0" func NewRoot() *Root { return &Root{ diff --git a/bundle/libraries/expand_glob_references.go b/bundle/libraries/expand_glob_references.go index c71615e0e..bb1905045 100644 --- a/bundle/libraries/expand_glob_references.go +++ b/bundle/libraries/expand_glob_references.go @@ -11,8 +11,7 @@ import ( "github.com/databricks/cli/libs/dyn" ) -type expand struct { -} +type expand struct{} func matchError(p dyn.Path, l []dyn.Location, message string) diag.Diagnostic { return diag.Diagnostic{ @@ -189,7 +188,6 @@ func (e *expand) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { diags = diags.Extend(d) return dyn.V(output), nil }) - if err != nil { return dyn.InvalidValue, err } @@ -197,7 +195,6 @@ func (e *expand) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { return v, nil }) - if err != nil { diags = diags.Extend(diag.FromErr(err)) } diff --git a/bundle/libraries/filer.go b/bundle/libraries/filer.go index 4448ed325..e09c75e0e 100644 --- a/bundle/libraries/filer.go +++ b/bundle/libraries/filer.go @@ -24,7 +24,7 @@ func GetFilerForLibraries(ctx context.Context, b *bundle.Bundle) (filer.Filer, s switch { case IsVolumesPath(artifactPath): - return filerForVolume(ctx, b) + return filerForVolume(b) default: return filerForWorkspace(b) diff --git a/bundle/libraries/filer_test.go b/bundle/libraries/filer_test.go index 88ba152fc..c18da9726 100644 --- a/bundle/libraries/filer_test.go +++ b/bundle/libraries/filer_test.go @@ -7,10 +7,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/libs/filer" - sdkconfig "github.com/databricks/databricks-sdk-go/config" - "github.com/databricks/databricks-sdk-go/experimental/mocks" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" ) @@ -39,11 +36,6 @@ func TestGetFilerForLibrariesValidUcVolume(t *testing.T) { }, } - m := mocks.NewMockWorkspaceClient(t) - m.WorkspaceClient.Config = &sdkconfig.Config{} - m.GetMockFilesAPI().EXPECT().GetDirectoryMetadataByDirectoryPath(mock.Anything, "/Volumes/main/my_schema/my_volume").Return(nil) - b.SetWorkpaceClient(m.WorkspaceClient) - client, uploadPath, diags := GetFilerForLibraries(context.Background(), b) require.NoError(t, diags.Error()) assert.Equal(t, "/Volumes/main/my_schema/my_volume/.internal", uploadPath) diff --git a/bundle/libraries/filer_volume.go b/bundle/libraries/filer_volume.go index aecf68db1..176f475c6 100644 --- a/bundle/libraries/filer_volume.go +++ b/bundle/libraries/filer_volume.go @@ -1,132 +1,16 @@ package libraries import ( - "context" - "errors" - "fmt" "path" - "strings" "github.com/databricks/cli/bundle" "github.com/databricks/cli/libs/diag" - "github.com/databricks/cli/libs/dyn" - "github.com/databricks/cli/libs/dyn/dynvar" "github.com/databricks/cli/libs/filer" - "github.com/databricks/databricks-sdk-go/apierr" ) -func extractVolumeFromPath(artifactPath string) (string, string, string, error) { - if !IsVolumesPath(artifactPath) { - return "", "", "", fmt.Errorf("expected artifact_path to start with /Volumes/, got %s", artifactPath) - } - - parts := strings.Split(artifactPath, "/") - volumeFormatErr := fmt.Errorf("expected UC volume path to be in the format /Volumes////..., got %s", artifactPath) - - // Incorrect format. - if len(parts) < 5 { - return "", "", "", volumeFormatErr - } - - catalogName := parts[2] - schemaName := parts[3] - volumeName := parts[4] - - // Incorrect format. - if catalogName == "" || schemaName == "" || volumeName == "" { - return "", "", "", volumeFormatErr - } - - return catalogName, schemaName, volumeName, nil -} - -// This function returns a filer for ".internal" folder inside the directory configured -// at `workspace.artifact_path`. -// This function also checks if the UC volume exists in the workspace and then: -// 1. If the UC volume exists in the workspace: -// Returns a filer for the UC volume. -// 2. If the UC volume does not exist in the workspace but is (with high confidence) defined in -// the bundle configuration: -// Returns an error and a warning that instructs the user to deploy the -// UC volume before using it in the artifact path. -// 3. If the UC volume does not exist in the workspace and is not defined in the bundle configuration: -// Returns an error. -func filerForVolume(ctx context.Context, b *bundle.Bundle) (filer.Filer, string, diag.Diagnostics) { - artifactPath := b.Config.Workspace.ArtifactPath +func filerForVolume(b *bundle.Bundle) (filer.Filer, string, diag.Diagnostics) { w := b.WorkspaceClient() - - catalogName, schemaName, volumeName, err := extractVolumeFromPath(artifactPath) - if err != nil { - return nil, "", diag.Diagnostics{ - { - Severity: diag.Error, - Summary: err.Error(), - Locations: b.Config.GetLocations("workspace.artifact_path"), - Paths: []dyn.Path{dyn.MustPathFromString("workspace.artifact_path")}, - }, - } - } - - // Check if the UC volume exists in the workspace. - volumePath := fmt.Sprintf("/Volumes/%s/%s/%s", catalogName, schemaName, volumeName) - err = w.Files.GetDirectoryMetadataByDirectoryPath(ctx, volumePath) - - // If the volume exists already, directly return the filer for the path to - // upload the artifacts to. - if err == nil { - uploadPath := path.Join(artifactPath, InternalDirName) - f, err := filer.NewFilesClient(w, uploadPath) - return f, uploadPath, diag.FromErr(err) - } - - baseErr := diag.Diagnostic{ - Severity: diag.Error, - Summary: fmt.Sprintf("unable to determine if volume at %s exists: %s", volumePath, err), - Locations: b.Config.GetLocations("workspace.artifact_path"), - Paths: []dyn.Path{dyn.MustPathFromString("workspace.artifact_path")}, - } - - if errors.Is(err, apierr.ErrNotFound) { - // Since the API returned a 404, the volume does not exist. - // Modify the error message to provide more context. - baseErr.Summary = fmt.Sprintf("volume %s does not exist: %s", volumePath, err) - - // If the volume is defined in the bundle, provide a more helpful error diagnostic, - // with more details and location information. - path, locations, ok := findVolumeInBundle(b, catalogName, schemaName, volumeName) - if !ok { - return nil, "", diag.Diagnostics{baseErr} - } - baseErr.Detail = `You are using a volume in your artifact_path that is managed by -this bundle but which has not been deployed yet. Please first deploy -the volume using 'bundle deploy' and then switch over to using it in -the artifact_path.` - baseErr.Paths = append(baseErr.Paths, path) - baseErr.Locations = append(baseErr.Locations, locations...) - } - - return nil, "", diag.Diagnostics{baseErr} -} - -func findVolumeInBundle(b *bundle.Bundle, catalogName, schemaName, volumeName string) (dyn.Path, []dyn.Location, bool) { - volumes := b.Config.Resources.Volumes - for k, v := range volumes { - if v.CatalogName != catalogName || v.Name != volumeName { - continue - } - // UC schemas can be defined in the bundle itself, and thus might be interpolated - // at runtime via the ${resources.schemas.} syntax. Thus we match the volume - // definition if the schema name is the same as the one in the bundle, or if the - // schema name is interpolated. - // We only have to check for ${resources.schemas...} references because any - // other valid reference (like ${var.foo}) would have been interpolated by this point. - p, ok := dynvar.PureReferenceToPath(v.SchemaName) - isSchemaDefinedInBundle := ok && p.HasPrefix(dyn.Path{dyn.Key("resources"), dyn.Key("schemas")}) - if v.SchemaName != schemaName && !isSchemaDefinedInBundle { - continue - } - pathString := fmt.Sprintf("resources.volumes.%s", k) - return dyn.MustPathFromString(pathString), b.Config.GetLocations(pathString), true - } - return nil, nil, false + uploadPath := path.Join(b.Config.Workspace.ArtifactPath, InternalDirName) + f, err := filer.NewFilesClient(w, uploadPath) + return f, uploadPath, diag.FromErr(err) } diff --git a/bundle/libraries/filer_volume_test.go b/bundle/libraries/filer_volume_test.go index 0d886824d..39bdc4135 100644 --- a/bundle/libraries/filer_volume_test.go +++ b/bundle/libraries/filer_volume_test.go @@ -1,275 +1,27 @@ package libraries import ( - "context" - "fmt" "path" "testing" "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" - "github.com/databricks/cli/bundle/config/resources" - "github.com/databricks/cli/bundle/internal/bundletest" - "github.com/databricks/cli/libs/diag" - "github.com/databricks/cli/libs/dyn" "github.com/databricks/cli/libs/filer" - "github.com/databricks/databricks-sdk-go/apierr" - sdkconfig "github.com/databricks/databricks-sdk-go/config" - "github.com/databricks/databricks-sdk-go/experimental/mocks" - "github.com/databricks/databricks-sdk-go/service/catalog" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" ) -func TestFindVolumeInBundle(t *testing.T) { - b := &bundle.Bundle{ - Config: config.Root{ - Resources: config.Resources{ - Volumes: map[string]*resources.Volume{ - "foo": { - CreateVolumeRequestContent: &catalog.CreateVolumeRequestContent{ - CatalogName: "main", - Name: "my_volume", - SchemaName: "my_schema", - }, - }, - }, - }, - }, - } - - bundletest.SetLocation(b, "resources.volumes.foo", []dyn.Location{ - { - File: "volume.yml", - Line: 1, - Column: 2, - }, - }) - - // volume is in DAB. - path, locations, ok := findVolumeInBundle(b, "main", "my_schema", "my_volume") - assert.True(t, ok) - assert.Equal(t, []dyn.Location{{ - File: "volume.yml", - Line: 1, - Column: 2, - }}, locations) - assert.Equal(t, dyn.MustPathFromString("resources.volumes.foo"), path) - - // wrong volume name - _, _, ok = findVolumeInBundle(b, "main", "my_schema", "doesnotexist") - assert.False(t, ok) - - // wrong schema name - _, _, ok = findVolumeInBundle(b, "main", "doesnotexist", "my_volume") - assert.False(t, ok) - - // wrong catalog name - _, _, ok = findVolumeInBundle(b, "doesnotexist", "my_schema", "my_volume") - assert.False(t, ok) - - // schema name is interpolated but does not have the right prefix. In this case - // we should not match the volume. - b.Config.Resources.Volumes["foo"].SchemaName = "${foo.bar.baz}" - _, _, ok = findVolumeInBundle(b, "main", "my_schema", "my_volume") - assert.False(t, ok) - - // schema name is interpolated. - b.Config.Resources.Volumes["foo"].SchemaName = "${resources.schemas.my_schema.name}" - path, locations, ok = findVolumeInBundle(b, "main", "valuedoesnotmatter", "my_volume") - assert.True(t, ok) - assert.Equal(t, []dyn.Location{{ - File: "volume.yml", - Line: 1, - Column: 2, - }}, locations) - assert.Equal(t, dyn.MustPathFromString("resources.volumes.foo"), path) -} - -func TestFilerForVolumeForErrorFromAPI(t *testing.T) { +func TestFilerForVolume(t *testing.T) { b := &bundle.Bundle{ Config: config.Root{ Workspace: config.Workspace{ - ArtifactPath: "/Volumes/main/my_schema/my_volume", + ArtifactPath: "/Volumes/main/my_schema/my_volume/abc", }, }, } - bundletest.SetLocation(b, "workspace.artifact_path", []dyn.Location{{File: "config.yml", Line: 1, Column: 2}}) - - m := mocks.NewMockWorkspaceClient(t) - m.WorkspaceClient.Config = &sdkconfig.Config{} - m.GetMockFilesAPI().EXPECT().GetDirectoryMetadataByDirectoryPath(mock.Anything, "/Volumes/main/my_schema/my_volume").Return(fmt.Errorf("error from API")) - b.SetWorkpaceClient(m.WorkspaceClient) - - _, _, diags := filerForVolume(context.Background(), b) - assert.Equal(t, diag.Diagnostics{ - { - Severity: diag.Error, - Summary: "unable to determine if volume at /Volumes/main/my_schema/my_volume exists: error from API", - Locations: []dyn.Location{{File: "config.yml", Line: 1, Column: 2}}, - Paths: []dyn.Path{dyn.MustPathFromString("workspace.artifact_path")}, - }}, diags) -} - -func TestFilerForVolumeWithVolumeNotFound(t *testing.T) { - b := &bundle.Bundle{ - Config: config.Root{ - Workspace: config.Workspace{ - ArtifactPath: "/Volumes/main/my_schema/doesnotexist", - }, - }, - } - - bundletest.SetLocation(b, "workspace.artifact_path", []dyn.Location{{File: "config.yml", Line: 1, Column: 2}}) - - m := mocks.NewMockWorkspaceClient(t) - m.WorkspaceClient.Config = &sdkconfig.Config{} - m.GetMockFilesAPI().EXPECT().GetDirectoryMetadataByDirectoryPath(mock.Anything, "/Volumes/main/my_schema/doesnotexist").Return(apierr.NotFound("some error message")) - b.SetWorkpaceClient(m.WorkspaceClient) - - _, _, diags := filerForVolume(context.Background(), b) - assert.Equal(t, diag.Diagnostics{ - { - Severity: diag.Error, - Summary: "volume /Volumes/main/my_schema/doesnotexist does not exist: some error message", - Locations: []dyn.Location{{File: "config.yml", Line: 1, Column: 2}}, - Paths: []dyn.Path{dyn.MustPathFromString("workspace.artifact_path")}, - }}, diags) -} - -func TestFilerForVolumeNotFoundAndInBundle(t *testing.T) { - b := &bundle.Bundle{ - Config: config.Root{ - Workspace: config.Workspace{ - ArtifactPath: "/Volumes/main/my_schema/my_volume", - }, - Resources: config.Resources{ - Volumes: map[string]*resources.Volume{ - "foo": { - CreateVolumeRequestContent: &catalog.CreateVolumeRequestContent{ - CatalogName: "main", - Name: "my_volume", - VolumeType: "MANAGED", - SchemaName: "my_schema", - }, - }, - }, - }, - }, - } - - bundletest.SetLocation(b, "workspace.artifact_path", []dyn.Location{{File: "config.yml", Line: 1, Column: 2}}) - bundletest.SetLocation(b, "resources.volumes.foo", []dyn.Location{{File: "volume.yml", Line: 1, Column: 2}}) - - m := mocks.NewMockWorkspaceClient(t) - m.WorkspaceClient.Config = &sdkconfig.Config{} - m.GetMockFilesAPI().EXPECT().GetDirectoryMetadataByDirectoryPath(mock.Anything, "/Volumes/main/my_schema/my_volume").Return(apierr.NotFound("error from API")) - b.SetWorkpaceClient(m.WorkspaceClient) - - _, _, diags := GetFilerForLibraries(context.Background(), b) - assert.Equal(t, diag.Diagnostics{ - { - Severity: diag.Error, - Summary: "volume /Volumes/main/my_schema/my_volume does not exist: error from API", - Locations: []dyn.Location{{"config.yml", 1, 2}, {"volume.yml", 1, 2}}, - Paths: []dyn.Path{dyn.MustPathFromString("workspace.artifact_path"), dyn.MustPathFromString("resources.volumes.foo")}, - Detail: `You are using a volume in your artifact_path that is managed by -this bundle but which has not been deployed yet. Please first deploy -the volume using 'bundle deploy' and then switch over to using it in -the artifact_path.`, - }, - }, diags) -} - -func invalidVolumePaths() []string { - return []string{ - "/Volumes/", - "/Volumes/main", - "/Volumes/main/", - "/Volumes/main//", - "/Volumes/main//my_schema", - "/Volumes/main/my_schema", - "/Volumes/main/my_schema/", - "/Volumes/main/my_schema//", - "/Volumes//my_schema/my_volume", - } -} - -func TestFilerForVolumeWithInvalidVolumePaths(t *testing.T) { - for _, p := range invalidVolumePaths() { - b := &bundle.Bundle{ - Config: config.Root{ - Workspace: config.Workspace{ - ArtifactPath: p, - }, - }, - } - - bundletest.SetLocation(b, "workspace.artifact_path", []dyn.Location{{File: "config.yml", Line: 1, Column: 2}}) - - _, _, diags := GetFilerForLibraries(context.Background(), b) - require.Equal(t, diags, diag.Diagnostics{{ - Severity: diag.Error, - Summary: fmt.Sprintf("expected UC volume path to be in the format /Volumes////..., got %s", p), - Locations: []dyn.Location{{File: "config.yml", Line: 1, Column: 2}}, - Paths: []dyn.Path{dyn.MustPathFromString("workspace.artifact_path")}, - }}) - } -} - -func TestFilerForVolumeWithInvalidPrefix(t *testing.T) { - b := &bundle.Bundle{ - Config: config.Root{ - Workspace: config.Workspace{ - ArtifactPath: "/Volume/main/my_schema/my_volume", - }, - }, - } - - _, _, diags := filerForVolume(context.Background(), b) - require.EqualError(t, diags.Error(), "expected artifact_path to start with /Volumes/, got /Volume/main/my_schema/my_volume") -} - -func TestFilerForVolumeWithValidVolumePaths(t *testing.T) { - validPaths := []string{ - "/Volumes/main/my_schema/my_volume", - "/Volumes/main/my_schema/my_volume/", - "/Volumes/main/my_schema/my_volume/a/b/c", - "/Volumes/main/my_schema/my_volume/a/a/a", - } - - for _, p := range validPaths { - b := &bundle.Bundle{ - Config: config.Root{ - Workspace: config.Workspace{ - ArtifactPath: p, - }, - }, - } - - m := mocks.NewMockWorkspaceClient(t) - m.WorkspaceClient.Config = &sdkconfig.Config{} - m.GetMockFilesAPI().EXPECT().GetDirectoryMetadataByDirectoryPath(mock.Anything, "/Volumes/main/my_schema/my_volume").Return(nil) - b.SetWorkpaceClient(m.WorkspaceClient) - - client, uploadPath, diags := filerForVolume(context.Background(), b) - require.NoError(t, diags.Error()) - assert.Equal(t, path.Join(p, ".internal"), uploadPath) - assert.IsType(t, &filer.FilesClient{}, client) - } -} - -func TestExtractVolumeFromPath(t *testing.T) { - catalogName, schemaName, volumeName, err := extractVolumeFromPath("/Volumes/main/my_schema/my_volume") - require.NoError(t, err) - assert.Equal(t, "main", catalogName) - assert.Equal(t, "my_schema", schemaName) - assert.Equal(t, "my_volume", volumeName) - - for _, p := range invalidVolumePaths() { - _, _, _, err := extractVolumeFromPath(p) - assert.EqualError(t, err, fmt.Sprintf("expected UC volume path to be in the format /Volumes////..., got %s", p)) - } + client, uploadPath, diags := filerForVolume(b) + require.NoError(t, diags.Error()) + assert.Equal(t, path.Join("/Volumes/main/my_schema/my_volume/abc/.internal"), uploadPath) + assert.IsType(t, &filer.FilesClient{}, client) } diff --git a/bundle/libraries/helpers.go b/bundle/libraries/helpers.go index 2149e5885..5a1a9511c 100644 --- a/bundle/libraries/helpers.go +++ b/bundle/libraries/helpers.go @@ -1,7 +1,7 @@ package libraries import ( - "fmt" + "errors" "github.com/databricks/databricks-sdk-go/service/compute" ) @@ -20,5 +20,5 @@ func libraryPath(library *compute.Library) (string, error) { return library.Requirements, nil } - return "", fmt.Errorf("not supported library type") + return "", errors.New("not supported library type") } diff --git a/bundle/libraries/helpers_test.go b/bundle/libraries/helpers_test.go index 9d7e12ee5..754aa8f95 100644 --- a/bundle/libraries/helpers_test.go +++ b/bundle/libraries/helpers_test.go @@ -12,25 +12,25 @@ func TestLibraryPath(t *testing.T) { p, err := libraryPath(&compute.Library{Whl: path}) assert.Equal(t, path, p) - assert.Nil(t, err) + assert.NoError(t, err) p, err = libraryPath(&compute.Library{Jar: path}) assert.Equal(t, path, p) - assert.Nil(t, err) + assert.NoError(t, err) p, err = libraryPath(&compute.Library{Egg: path}) assert.Equal(t, path, p) - assert.Nil(t, err) + assert.NoError(t, err) p, err = libraryPath(&compute.Library{Requirements: path}) assert.Equal(t, path, p) - assert.Nil(t, err) + assert.NoError(t, err) p, err = libraryPath(&compute.Library{}) assert.Equal(t, "", p) - assert.NotNil(t, err) + assert.Error(t, err) p, err = libraryPath(&compute.Library{Pypi: &compute.PythonPyPiLibrary{Package: "pypipackage"}}) assert.Equal(t, "", p) - assert.NotNil(t, err) + assert.Error(t, err) } diff --git a/bundle/libraries/upload.go b/bundle/libraries/upload.go index 4b6f43701..a2162fb7b 100644 --- a/bundle/libraries/upload.go +++ b/bundle/libraries/upload.go @@ -81,7 +81,6 @@ func collectLocalLibraries(b *bundle.Bundle) (map[string][]configLocation, error return v, nil }) }) - if err != nil { return nil, err } @@ -119,7 +118,6 @@ func collectLocalLibraries(b *bundle.Bundle) (map[string][]configLocation, error return v, nil }) }) - if err != nil { return nil, err } @@ -175,7 +173,6 @@ func (u *upload) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { return v, nil }) - if err != nil { diags = diags.Extend(diag.FromErr(err)) } diff --git a/bundle/libraries/upload_test.go b/bundle/libraries/upload_test.go index 493785bf5..44b194c56 100644 --- a/bundle/libraries/upload_test.go +++ b/bundle/libraries/upload_test.go @@ -11,8 +11,6 @@ import ( mockfiler "github.com/databricks/cli/internal/mocks/libs/filer" "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/filer" - sdkconfig "github.com/databricks/databricks-sdk-go/config" - "github.com/databricks/databricks-sdk-go/experimental/mocks" "github.com/databricks/databricks-sdk-go/service/compute" "github.com/databricks/databricks-sdk-go/service/jobs" "github.com/stretchr/testify/mock" @@ -183,11 +181,6 @@ func TestArtifactUploadForVolumes(t *testing.T) { filer.CreateParentDirectories, ).Return(nil) - m := mocks.NewMockWorkspaceClient(t) - m.WorkspaceClient.Config = &sdkconfig.Config{} - m.GetMockFilesAPI().EXPECT().GetDirectoryMetadataByDirectoryPath(mock.Anything, "/Volumes/foo/bar/artifacts").Return(nil) - b.SetWorkpaceClient(m.WorkspaceClient) - diags := bundle.Apply(context.Background(), b, bundle.Seq(ExpandGlobReferences(), UploadWithClient(mockFiler))) require.NoError(t, diags.Error()) diff --git a/bundle/permissions/filter.go b/bundle/permissions/filter.go index 60264f6ea..6fa8d1374 100644 --- a/bundle/permissions/filter.go +++ b/bundle/permissions/filter.go @@ -56,7 +56,6 @@ func filter(currentUser string) dyn.WalkValueFunc { } return v, nil - } } diff --git a/bundle/permissions/filter_test.go b/bundle/permissions/filter_test.go index 121ce10dc..ef7167d75 100644 --- a/bundle/permissions/filter_test.go +++ b/bundle/permissions/filter_test.go @@ -90,7 +90,6 @@ func testFixture(userName string) *bundle.Bundle { }, }, } - } func TestFilterCurrentUser(t *testing.T) { @@ -100,32 +99,32 @@ func TestFilterCurrentUser(t *testing.T) { assert.NoError(t, diags.Error()) // Assert current user is filtered out. - assert.Equal(t, 2, len(b.Config.Resources.Jobs["job1"].Permissions)) + assert.Len(t, b.Config.Resources.Jobs["job1"].Permissions, 2) assert.Contains(t, b.Config.Resources.Jobs["job1"].Permissions, robot) assert.Contains(t, b.Config.Resources.Jobs["job1"].Permissions, bob) - assert.Equal(t, 2, len(b.Config.Resources.Jobs["job2"].Permissions)) + assert.Len(t, b.Config.Resources.Jobs["job2"].Permissions, 2) assert.Contains(t, b.Config.Resources.Jobs["job2"].Permissions, robot) assert.Contains(t, b.Config.Resources.Jobs["job2"].Permissions, bob) - assert.Equal(t, 2, len(b.Config.Resources.Pipelines["pipeline1"].Permissions)) + assert.Len(t, b.Config.Resources.Pipelines["pipeline1"].Permissions, 2) assert.Contains(t, b.Config.Resources.Pipelines["pipeline1"].Permissions, robot) assert.Contains(t, b.Config.Resources.Pipelines["pipeline1"].Permissions, bob) - assert.Equal(t, 2, len(b.Config.Resources.Experiments["experiment1"].Permissions)) + assert.Len(t, b.Config.Resources.Experiments["experiment1"].Permissions, 2) assert.Contains(t, b.Config.Resources.Experiments["experiment1"].Permissions, robot) assert.Contains(t, b.Config.Resources.Experiments["experiment1"].Permissions, bob) - assert.Equal(t, 2, len(b.Config.Resources.Models["model1"].Permissions)) + assert.Len(t, b.Config.Resources.Models["model1"].Permissions, 2) assert.Contains(t, b.Config.Resources.Models["model1"].Permissions, robot) assert.Contains(t, b.Config.Resources.Models["model1"].Permissions, bob) - assert.Equal(t, 2, len(b.Config.Resources.ModelServingEndpoints["endpoint1"].Permissions)) + assert.Len(t, b.Config.Resources.ModelServingEndpoints["endpoint1"].Permissions, 2) assert.Contains(t, b.Config.Resources.ModelServingEndpoints["endpoint1"].Permissions, robot) assert.Contains(t, b.Config.Resources.ModelServingEndpoints["endpoint1"].Permissions, bob) // Assert there's no change to the grant. - assert.Equal(t, 1, len(b.Config.Resources.RegisteredModels["registered_model1"].Grants)) + assert.Len(t, b.Config.Resources.RegisteredModels["registered_model1"].Grants, 1) } func TestFilterCurrentServicePrincipal(t *testing.T) { @@ -135,32 +134,32 @@ func TestFilterCurrentServicePrincipal(t *testing.T) { assert.NoError(t, diags.Error()) // Assert current user is filtered out. - assert.Equal(t, 2, len(b.Config.Resources.Jobs["job1"].Permissions)) + assert.Len(t, b.Config.Resources.Jobs["job1"].Permissions, 2) assert.Contains(t, b.Config.Resources.Jobs["job1"].Permissions, alice) assert.Contains(t, b.Config.Resources.Jobs["job1"].Permissions, bob) - assert.Equal(t, 2, len(b.Config.Resources.Jobs["job2"].Permissions)) + assert.Len(t, b.Config.Resources.Jobs["job2"].Permissions, 2) assert.Contains(t, b.Config.Resources.Jobs["job2"].Permissions, alice) assert.Contains(t, b.Config.Resources.Jobs["job2"].Permissions, bob) - assert.Equal(t, 2, len(b.Config.Resources.Pipelines["pipeline1"].Permissions)) + assert.Len(t, b.Config.Resources.Pipelines["pipeline1"].Permissions, 2) assert.Contains(t, b.Config.Resources.Pipelines["pipeline1"].Permissions, alice) assert.Contains(t, b.Config.Resources.Pipelines["pipeline1"].Permissions, bob) - assert.Equal(t, 2, len(b.Config.Resources.Experiments["experiment1"].Permissions)) + assert.Len(t, b.Config.Resources.Experiments["experiment1"].Permissions, 2) assert.Contains(t, b.Config.Resources.Experiments["experiment1"].Permissions, alice) assert.Contains(t, b.Config.Resources.Experiments["experiment1"].Permissions, bob) - assert.Equal(t, 2, len(b.Config.Resources.Models["model1"].Permissions)) + assert.Len(t, b.Config.Resources.Models["model1"].Permissions, 2) assert.Contains(t, b.Config.Resources.Models["model1"].Permissions, alice) assert.Contains(t, b.Config.Resources.Models["model1"].Permissions, bob) - assert.Equal(t, 2, len(b.Config.Resources.ModelServingEndpoints["endpoint1"].Permissions)) + assert.Len(t, b.Config.Resources.ModelServingEndpoints["endpoint1"].Permissions, 2) assert.Contains(t, b.Config.Resources.ModelServingEndpoints["endpoint1"].Permissions, alice) assert.Contains(t, b.Config.Resources.ModelServingEndpoints["endpoint1"].Permissions, bob) // Assert there's no change to the grant. - assert.Equal(t, 1, len(b.Config.Resources.RegisteredModels["registered_model1"].Grants)) + assert.Len(t, b.Config.Resources.RegisteredModels["registered_model1"].Grants, 1) } func TestFilterCurrentUserDoesNotErrorWhenNoResources(t *testing.T) { diff --git a/bundle/permissions/mutator.go b/bundle/permissions/mutator.go index bc1392d93..cd7cbf40c 100644 --- a/bundle/permissions/mutator.go +++ b/bundle/permissions/mutator.go @@ -7,43 +7,52 @@ import ( "strings" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config/resources" "github.com/databricks/cli/libs/diag" + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/dyn/convert" ) -const CAN_MANAGE = "CAN_MANAGE" -const CAN_VIEW = "CAN_VIEW" -const CAN_RUN = "CAN_RUN" +const ( + CAN_MANAGE = "CAN_MANAGE" + CAN_VIEW = "CAN_VIEW" + CAN_RUN = "CAN_RUN" +) -var allowedLevels = []string{CAN_MANAGE, CAN_VIEW, CAN_RUN} -var levelsMap = map[string](map[string]string){ - "jobs": { - CAN_MANAGE: "CAN_MANAGE", - CAN_VIEW: "CAN_VIEW", - CAN_RUN: "CAN_MANAGE_RUN", - }, - "pipelines": { - CAN_MANAGE: "CAN_MANAGE", - CAN_VIEW: "CAN_VIEW", - CAN_RUN: "CAN_RUN", - }, - "mlflow_experiments": { - CAN_MANAGE: "CAN_MANAGE", - CAN_VIEW: "CAN_READ", - }, - "mlflow_models": { - CAN_MANAGE: "CAN_MANAGE", - CAN_VIEW: "CAN_READ", - }, - "model_serving_endpoints": { - CAN_MANAGE: "CAN_MANAGE", - CAN_VIEW: "CAN_VIEW", - CAN_RUN: "CAN_QUERY", - }, - "dashboards": { - CAN_MANAGE: "CAN_MANAGE", - CAN_VIEW: "CAN_READ", - }, -} +var unsupportedResources = []string{"clusters", "volumes", "schemas", "quality_monitors", "registered_models"} + +var ( + allowedLevels = []string{CAN_MANAGE, CAN_VIEW, CAN_RUN} + levelsMap = map[string](map[string]string){ + "jobs": { + CAN_MANAGE: "CAN_MANAGE", + CAN_VIEW: "CAN_VIEW", + CAN_RUN: "CAN_MANAGE_RUN", + }, + "pipelines": { + CAN_MANAGE: "CAN_MANAGE", + CAN_VIEW: "CAN_VIEW", + CAN_RUN: "CAN_RUN", + }, + "experiments": { + CAN_MANAGE: "CAN_MANAGE", + CAN_VIEW: "CAN_READ", + }, + "models": { + CAN_MANAGE: "CAN_MANAGE", + CAN_VIEW: "CAN_READ", + }, + "model_serving_endpoints": { + CAN_MANAGE: "CAN_MANAGE", + CAN_VIEW: "CAN_VIEW", + CAN_RUN: "CAN_QUERY", + }, + "dashboards": { + CAN_MANAGE: "CAN_MANAGE", + CAN_VIEW: "CAN_READ", + }, + } +) type bundlePermissions struct{} @@ -57,11 +66,55 @@ func (m *bundlePermissions) Apply(ctx context.Context, b *bundle.Bundle) diag.Di return diag.FromErr(err) } - applyForJobs(ctx, b) - applyForPipelines(ctx, b) - applyForMlModels(ctx, b) - applyForMlExperiments(ctx, b) - applyForModelServiceEndpoints(ctx, b) + patterns := make(map[string]dyn.Pattern, 0) + for key := range levelsMap { + patterns[key] = dyn.NewPattern( + dyn.Key("resources"), + dyn.Key(key), + dyn.AnyKey(), + ) + } + + err = b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) { + for key, pattern := range patterns { + v, err = dyn.MapByPattern(v, pattern, func(p dyn.Path, v dyn.Value) (dyn.Value, error) { + var permissions []resources.Permission + pv, err := dyn.Get(v, "permissions") + // If the permissions field is not found, we set to an empty array + if err != nil { + pv = dyn.V([]dyn.Value{}) + } + + err = convert.ToTyped(&permissions, pv) + if err != nil { + return dyn.InvalidValue, fmt.Errorf("failed to convert permissions: %w", err) + } + + permissions = append(permissions, convertPermissions( + ctx, + b.Config.Permissions, + permissions, + key, + levelsMap[key], + )...) + + pv, err = convert.FromTyped(permissions, dyn.NilValue) + if err != nil { + return dyn.InvalidValue, fmt.Errorf("failed to convert permissions: %w", err) + } + + return dyn.Set(v, "permissions", pv) + }) + if err != nil { + return dyn.InvalidValue, err + } + } + + return v, nil + }) + if err != nil { + return diag.FromErr(err) + } return nil } @@ -76,66 +129,6 @@ func validate(b *bundle.Bundle) error { return nil } -func applyForJobs(ctx context.Context, b *bundle.Bundle) { - for key, job := range b.Config.Resources.Jobs { - job.Permissions = append(job.Permissions, convert( - ctx, - b.Config.Permissions, - job.Permissions, - key, - levelsMap["jobs"], - )...) - } -} - -func applyForPipelines(ctx context.Context, b *bundle.Bundle) { - for key, pipeline := range b.Config.Resources.Pipelines { - pipeline.Permissions = append(pipeline.Permissions, convert( - ctx, - b.Config.Permissions, - pipeline.Permissions, - key, - levelsMap["pipelines"], - )...) - } -} - -func applyForMlExperiments(ctx context.Context, b *bundle.Bundle) { - for key, experiment := range b.Config.Resources.Experiments { - experiment.Permissions = append(experiment.Permissions, convert( - ctx, - b.Config.Permissions, - experiment.Permissions, - key, - levelsMap["mlflow_experiments"], - )...) - } -} - -func applyForMlModels(ctx context.Context, b *bundle.Bundle) { - for key, model := range b.Config.Resources.Models { - model.Permissions = append(model.Permissions, convert( - ctx, - b.Config.Permissions, - model.Permissions, - key, - levelsMap["mlflow_models"], - )...) - } -} - -func applyForModelServiceEndpoints(ctx context.Context, b *bundle.Bundle) { - for key, model := range b.Config.Resources.ModelServingEndpoints { - model.Permissions = append(model.Permissions, convert( - ctx, - b.Config.Permissions, - model.Permissions, - key, - levelsMap["model_serving_endpoints"], - )...) - } -} - func (m *bundlePermissions) Name() string { return "ApplyBundlePermissions" } diff --git a/bundle/permissions/mutator_test.go b/bundle/permissions/mutator_test.go index 1a177d902..15586e979 100644 --- a/bundle/permissions/mutator_test.go +++ b/bundle/permissions/mutator_test.go @@ -2,12 +2,15 @@ package permissions import ( "context" + "fmt" + "slices" "testing" "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config/resources" "github.com/databricks/databricks-sdk-go/service/jobs" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -51,6 +54,10 @@ func TestApplyBundlePermissions(t *testing.T) { "endpoint_1": {}, "endpoint_2": {}, }, + Dashboards: map[string]*resources.Dashboard{ + "dashboard_1": {}, + "dashboard_2": {}, + }, }, }, } @@ -103,6 +110,10 @@ func TestApplyBundlePermissions(t *testing.T) { require.Contains(t, b.Config.Resources.ModelServingEndpoints["endpoint_2"].Permissions, resources.Permission{Level: "CAN_MANAGE", UserName: "TestUser"}) require.Contains(t, b.Config.Resources.ModelServingEndpoints["endpoint_2"].Permissions, resources.Permission{Level: "CAN_VIEW", GroupName: "TestGroup"}) require.Contains(t, b.Config.Resources.ModelServingEndpoints["endpoint_2"].Permissions, resources.Permission{Level: "CAN_QUERY", ServicePrincipalName: "TestServicePrincipal"}) + + require.Len(t, b.Config.Resources.Dashboards["dashboard_1"].Permissions, 2) + require.Contains(t, b.Config.Resources.Dashboards["dashboard_1"].Permissions, resources.Permission{Level: "CAN_MANAGE", UserName: "TestUser"}) + require.Contains(t, b.Config.Resources.Dashboards["dashboard_1"].Permissions, resources.Permission{Level: "CAN_READ", GroupName: "TestGroup"}) } func TestWarningOnOverlapPermission(t *testing.T) { @@ -146,5 +157,20 @@ func TestWarningOnOverlapPermission(t *testing.T) { require.Contains(t, b.Config.Resources.Jobs["job_2"].Permissions, resources.Permission{Level: "CAN_VIEW", UserName: "TestUser2"}) require.Contains(t, b.Config.Resources.Jobs["job_2"].Permissions, resources.Permission{Level: "CAN_MANAGE", UserName: "TestUser"}) require.Contains(t, b.Config.Resources.Jobs["job_2"].Permissions, resources.Permission{Level: "CAN_VIEW", GroupName: "TestGroup"}) - +} + +func TestAllResourcesExplicitlyDefinedForPermissionsSupport(t *testing.T) { + r := config.Resources{} + + for _, resource := range unsupportedResources { + _, ok := levelsMap[resource] + assert.False(t, ok, "Resource %s is defined in both levelsMap and unsupportedResources", resource) + } + + for _, resource := range r.AllResources() { + _, ok := levelsMap[resource.Description.PluralName] + if !slices.Contains(unsupportedResources, resource.Description.PluralName) && !ok { + assert.Fail(t, fmt.Sprintf("Resource %s is not explicitly defined in levelsMap or unsupportedResources", resource.Description.PluralName)) + } + } } diff --git a/bundle/permissions/permission_diagnostics_test.go b/bundle/permissions/permission_diagnostics_test.go index 7b0afefa0..6c55ab594 100644 --- a/bundle/permissions/permission_diagnostics_test.go +++ b/bundle/permissions/permission_diagnostics_test.go @@ -28,7 +28,7 @@ func TestPermissionDiagnosticsApplyFail(t *testing.T) { }) diags := permissions.PermissionDiagnostics().Apply(context.Background(), b) - require.Equal(t, diags[0].Severity, diag.Warning) + require.Equal(t, diag.Warning, diags[0].Severity) require.Contains(t, diags[0].Summary, "permissions section should include testuser@databricks.com or one of their groups with CAN_MANAGE permissions") } diff --git a/bundle/permissions/utils.go b/bundle/permissions/utils.go index 9072cd252..cf16ea9b2 100644 --- a/bundle/permissions/utils.go +++ b/bundle/permissions/utils.go @@ -7,7 +7,7 @@ import ( "github.com/databricks/cli/libs/diag" ) -func convert( +func convertPermissions( ctx context.Context, bundlePermissions []resources.Permission, resourcePermissions []resources.Permission, diff --git a/bundle/permissions/validate.go b/bundle/permissions/validate.go index f1a18f430..dee7326cf 100644 --- a/bundle/permissions/validate.go +++ b/bundle/permissions/validate.go @@ -9,8 +9,7 @@ import ( "github.com/databricks/cli/libs/diag" ) -type validateSharedRootPermissions struct { -} +type validateSharedRootPermissions struct{} func ValidateSharedRootPermissions() bundle.Mutator { return &validateSharedRootPermissions{} diff --git a/bundle/permissions/workspace_path_permissions.go b/bundle/permissions/workspace_path_permissions.go index a3b4424c1..225d2499e 100644 --- a/bundle/permissions/workspace_path_permissions.go +++ b/bundle/permissions/workspace_path_permissions.go @@ -52,7 +52,7 @@ func (p WorkspacePathPermissions) Compare(perms []resources.Permission) diag.Dia } // containsAll checks if permA contains all permissions in permB. -func containsAll(permA []resources.Permission, permB []resources.Permission) (bool, []resources.Permission) { +func containsAll(permA, permB []resources.Permission) (bool, []resources.Permission) { missing := make([]resources.Permission, 0) for _, a := range permA { found := false diff --git a/bundle/permissions/workspace_path_permissions_test.go b/bundle/permissions/workspace_path_permissions_test.go index 0bb00474c..eaefad906 100644 --- a/bundle/permissions/workspace_path_permissions_test.go +++ b/bundle/permissions/workspace_path_permissions_test.go @@ -117,5 +117,4 @@ func TestWorkspacePathPermissionsCompare(t *testing.T) { diags := wp.Compare(tc.perms) require.Equal(t, tc.expected, diags) } - } diff --git a/bundle/permissions/workspace_root.go b/bundle/permissions/workspace_root.go index de4f3a7fe..828b12f50 100644 --- a/bundle/permissions/workspace_root.go +++ b/bundle/permissions/workspace_root.go @@ -3,6 +3,7 @@ package permissions import ( "context" "fmt" + "strconv" "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/libraries" @@ -12,8 +13,7 @@ import ( "golang.org/x/sync/errgroup" ) -type workspaceRootPermissions struct { -} +type workspaceRootPermissions struct{} func ApplyWorkspaceRootPermissions() bundle.Mutator { return &workspaceRootPermissions{} @@ -79,7 +79,7 @@ func setPermissions(ctx context.Context, w workspace.WorkspaceInterface, path st } _, err = w.SetPermissions(ctx, workspace.WorkspaceObjectPermissionsRequest{ - WorkspaceObjectId: fmt.Sprint(obj.ObjectId), + WorkspaceObjectId: strconv.FormatInt(obj.ObjectId, 10), WorkspaceObjectType: "directories", AccessControlList: permissions, }) diff --git a/bundle/phases/bind.go b/bundle/phases/bind.go index b2e92d6e2..c62c48aea 100644 --- a/bundle/phases/bind.go +++ b/bundle/phases/bind.go @@ -25,7 +25,7 @@ func Bind(opts *terraform.BindOptions) bundle.Mutator { ) } -func Unbind(resourceType string, resourceKey string) bundle.Mutator { +func Unbind(resourceType, resourceKey string) bundle.Mutator { return newPhase( "unbind", []bundle.Mutator{ diff --git a/bundle/phases/deploy.go b/bundle/phases/deploy.go index 2dc9623bd..16595611f 100644 --- a/bundle/phases/deploy.go +++ b/bundle/phases/deploy.go @@ -2,7 +2,7 @@ package phases import ( "context" - "fmt" + "errors" "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/artifacts" @@ -54,7 +54,7 @@ func filterDeleteOrRecreateActions(changes []*tfjson.ResourceChange, resourceTyp func approvalForDeploy(ctx context.Context, b *bundle.Bundle) (bool, error) { tf := b.Terraform if tf == nil { - return false, fmt.Errorf("terraform not initialized") + return false, errors.New("terraform not initialized") } // read plan file @@ -111,7 +111,7 @@ is removed from the catalog, but the underlying files are not deleted:` } if !cmdio.IsPromptSupported(ctx) { - return false, fmt.Errorf("the deployment requires destructive actions, but current console does not support prompting. Please specify --auto-approve if you would like to skip prompts and proceed") + return false, errors.New("the deployment requires destructive actions, but current console does not support prompting. Please specify --auto-approve if you would like to skip prompts and proceed") } cmdio.LogString(ctx, "") diff --git a/bundle/phases/destroy.go b/bundle/phases/destroy.go index 6eb8b6a01..05a41dea2 100644 --- a/bundle/phases/destroy.go +++ b/bundle/phases/destroy.go @@ -3,7 +3,6 @@ package phases import ( "context" "errors" - "fmt" "net/http" "github.com/databricks/cli/bundle" @@ -34,7 +33,7 @@ func assertRootPathExists(ctx context.Context, b *bundle.Bundle) (bool, error) { func approvalForDestroy(ctx context.Context, b *bundle.Bundle) (bool, error) { tf := b.Terraform if tf == nil { - return false, fmt.Errorf("terraform not initialized") + return false, errors.New("terraform not initialized") } // read plan file @@ -63,7 +62,7 @@ func approvalForDestroy(ctx context.Context, b *bundle.Bundle) (bool, error) { } - cmdio.LogString(ctx, fmt.Sprintf("All files and directories at the following location will be deleted: %s", b.Config.Workspace.RootPath)) + cmdio.LogString(ctx, "All files and directories at the following location will be deleted: "+b.Config.Workspace.RootPath) cmdio.LogString(ctx, "") if b.AutoApprove { diff --git a/bundle/phases/initialize.go b/bundle/phases/initialize.go index 6fa0e5fed..f7b3cd608 100644 --- a/bundle/phases/initialize.go +++ b/bundle/phases/initialize.go @@ -41,6 +41,10 @@ func Initialize() bundle.Mutator { mutator.PopulateCurrentUser(), mutator.LoadGitDetails(), + // This mutator needs to be run before variable interpolation and defining default workspace paths + // because it affects how workspace variables are resolved. + mutator.ApplySourceLinkedDeploymentPreset(), + mutator.DefineDefaultWorkspaceRoot(), mutator.ExpandWorkspaceRoot(), mutator.DefineDefaultWorkspacePaths(), @@ -51,10 +55,13 @@ func Initialize() bundle.Mutator { mutator.RewriteWorkspacePrefix(), mutator.SetVariables(), + // Intentionally placed before ResolveVariableReferencesInLookup, ResolveResourceReferences, // ResolveVariableReferencesInComplexVariables and ResolveVariableReferences. // See what is expected in PythonMutatorPhaseInit doc pythonmutator.PythonMutator(pythonmutator.PythonMutatorPhaseInit), + pythonmutator.PythonMutator(pythonmutator.PythonMutatorPhaseLoadResources), + pythonmutator.PythonMutator(pythonmutator.PythonMutatorPhaseApplyMutators), mutator.ResolveVariableReferencesInLookup(), mutator.ResolveResourceReferences(), mutator.ResolveVariableReferencesInComplexVariables(), diff --git a/bundle/render/render_text_output.go b/bundle/render/render_text_output.go index 92dacb448..bacb85735 100644 --- a/bundle/render/render_text_output.go +++ b/bundle/render/render_text_output.go @@ -110,7 +110,7 @@ func renderSummaryHeaderTemplate(out io.Writer, b *bundle.Bundle) error { return renderSummaryHeaderTemplate(out, &bundle.Bundle{}) } - var currentUser = &iam.User{} + currentUser := &iam.User{} if b.Config.Workspace.CurrentUser != nil { if b.Config.Workspace.CurrentUser.User != nil { @@ -171,10 +171,16 @@ func RenderDiagnostics(out io.Writer, b *bundle.Bundle, diags diag.Diagnostics, if err != nil { return fmt.Errorf("failed to render summary: %w", err) } - io.WriteString(out, "\n") + _, err = io.WriteString(out, "\n") + if err != nil { + return err + } } trailer := buildTrailer(diags) - io.WriteString(out, trailer) + _, err = io.WriteString(out, trailer) + if err != nil { + return err + } } return nil diff --git a/bundle/render/render_text_output_test.go b/bundle/render/render_text_output_test.go index 135d79dae..506756f70 100644 --- a/bundle/render/render_text_output_test.go +++ b/bundle/render/render_text_output_test.go @@ -376,7 +376,8 @@ func TestRenderDiagnostics(t *testing.T) { Locations: []dyn.Location{{ File: "foo.yaml", Line: 1, - Column: 2}}, + Column: 2, + }}, }, }, expected: "Error: failed to load xxx\n" + @@ -489,7 +490,8 @@ func TestRenderSummaryTemplate_nilBundle(t *testing.T) { err := renderSummaryHeaderTemplate(writer, nil) require.NoError(t, err) - io.WriteString(writer, buildTrailer(nil)) + _, err = io.WriteString(writer, buildTrailer(nil)) + require.NoError(t, err) assert.Equal(t, "Validation OK!\n", writer.String()) } diff --git a/bundle/root.go b/bundle/root.go index efc21e0ca..9ea9a8c13 100644 --- a/bundle/root.go +++ b/bundle/root.go @@ -2,6 +2,7 @@ package bundle import ( "context" + "errors" "fmt" "os" @@ -21,7 +22,7 @@ func getRootEnv(ctx context.Context) (string, error) { } stat, err := os.Stat(path) if err == nil && !stat.IsDir() { - err = fmt.Errorf("not a directory") + err = errors.New("not a directory") } if err != nil { return "", fmt.Errorf(`invalid bundle root %s="%s": %w`, env.RootVariable, path, err) diff --git a/bundle/root_test.go b/bundle/root_test.go index 99bf58a00..075242710 100644 --- a/bundle/root_test.go +++ b/bundle/root_test.go @@ -71,7 +71,7 @@ func TestRootLookup(t *testing.T) { defer f.Close() // Create directory tree. - err = os.MkdirAll("./a/b/c", 0755) + err = os.MkdirAll("./a/b/c", 0o755) require.NoError(t, err) // It should find the project root from $PWD. diff --git a/bundle/run/job.go b/bundle/run/job.go index 340af961c..2489ca619 100644 --- a/bundle/run/job.go +++ b/bundle/run/job.go @@ -3,6 +3,7 @@ package run import ( "context" "encoding/json" + "errors" "fmt" "strconv" "time" @@ -143,7 +144,7 @@ func logProgressCallback(ctx context.Context, progressLogger *cmdio.Logger) func progressLogger.Log(event) // log progress events in using the default logger - log.Infof(ctx, event.String()) + log.Info(ctx, event.String()) } } @@ -181,13 +182,13 @@ func (r *jobRunner) Run(ctx context.Context, opts *Options) (output.RunOutput, e // callback to log progress events. Called on every poll request progressLogger, ok := cmdio.FromContext(ctx) if !ok { - return nil, fmt.Errorf("no progress logger found") + return nil, errors.New("no progress logger found") } logProgress := logProgressCallback(ctx, progressLogger) waiter, err := w.Jobs.RunNow(ctx, *req) if err != nil { - return nil, fmt.Errorf("cannot start job") + return nil, errors.New("cannot start job") } if opts.NoWait { @@ -203,7 +204,7 @@ func (r *jobRunner) Run(ctx context.Context, opts *Options) (output.RunOutput, e logDebug(r) logProgress(r) }).GetWithTimeout(jobRunTimeout) - if err != nil && runId != nil { + if err != nil { r.logFailedTasks(ctx, *runId) } if err != nil { @@ -266,7 +267,7 @@ func (r *jobRunner) convertPythonParams(opts *Options) error { if len(opts.Job.pythonParams) > 0 { if _, ok := opts.Job.notebookParams["__python_params"]; ok { - return fmt.Errorf("can't use __python_params as notebook param, the name is reserved for internal use") + return errors.New("can't use __python_params as notebook param, the name is reserved for internal use") } p, err := json.Marshal(opts.Job.pythonParams) if err != nil { @@ -289,7 +290,6 @@ func (r *jobRunner) Cancel(ctx context.Context) error { ActiveOnly: true, JobId: jobID, }) - if err != nil { return err } diff --git a/bundle/run/job_args.go b/bundle/run/job_args.go index 85cf96efb..b1596bbb0 100644 --- a/bundle/run/job_args.go +++ b/bundle/run/job_args.go @@ -131,7 +131,7 @@ func (r *jobRunner) posArgsHandler() argsHandler { } // Handle task parameters otherwise. - var seen = make(map[jobTaskType]bool) + seen := make(map[jobTaskType]bool) for _, t := range job.Tasks { if t.NotebookTask != nil { seen[jobTaskTypeNotebook] = true diff --git a/bundle/run/job_options.go b/bundle/run/job_options.go index c359e79eb..7db8e72cd 100644 --- a/bundle/run/job_options.go +++ b/bundle/run/job_options.go @@ -1,7 +1,7 @@ package run import ( - "fmt" + "errors" "strconv" "github.com/databricks/cli/bundle/config/resources" @@ -60,16 +60,16 @@ func (o *JobOptions) hasJobParametersConfigured() bool { // Validate returns if the combination of options is valid. func (o *JobOptions) Validate(job *resources.Job) error { if job == nil { - return fmt.Errorf("job not defined") + return errors.New("job not defined") } // Ensure mutual exclusion on job parameters and task parameters. hasJobParams := len(job.Parameters) > 0 if hasJobParams && o.hasTaskParametersConfigured() { - return fmt.Errorf("the job to run defines job parameters; specifying task parameters is not allowed") + return errors.New("the job to run defines job parameters; specifying task parameters is not allowed") } if !hasJobParams && o.hasJobParametersConfigured() { - return fmt.Errorf("the job to run does not define job parameters; specifying job parameters is not allowed") + return errors.New("the job to run does not define job parameters; specifying job parameters is not allowed") } return nil @@ -80,7 +80,7 @@ func (o *JobOptions) validatePipelineParams() (*jobs.PipelineParams, error) { return nil, nil } - var defaultErr = fmt.Errorf("job run argument --pipeline-params only supports `full_refresh=`") + defaultErr := errors.New("job run argument --pipeline-params only supports `full_refresh=`") v, ok := o.pipelineParams["full_refresh"] if !ok { return nil, defaultErr diff --git a/bundle/run/job_test.go b/bundle/run/job_test.go index 369c546aa..72aecc887 100644 --- a/bundle/run/job_test.go +++ b/bundle/run/job_test.go @@ -42,7 +42,8 @@ func TestConvertPythonParams(t *testing.T) { opts := &Options{ Job: JobOptions{}, } - runner.convertPythonParams(opts) + err := runner.convertPythonParams(opts) + require.NoError(t, err) require.NotContains(t, opts.Job.notebookParams, "__python_params") opts = &Options{ @@ -50,9 +51,10 @@ func TestConvertPythonParams(t *testing.T) { pythonParams: []string{"param1", "param2", "param3"}, }, } - runner.convertPythonParams(opts) + err = runner.convertPythonParams(opts) + require.NoError(t, err) require.Contains(t, opts.Job.notebookParams, "__python_params") - require.Equal(t, opts.Job.notebookParams["__python_params"], `["param1","param2","param3"]`) + require.Equal(t, `["param1","param2","param3"]`, opts.Job.notebookParams["__python_params"]) } func TestJobRunnerCancel(t *testing.T) { @@ -158,7 +160,7 @@ func TestJobRunnerRestart(t *testing.T) { m := mocks.NewMockWorkspaceClient(t) b.SetWorkpaceClient(m.WorkspaceClient) ctx := context.Background() - ctx = cmdio.InContext(ctx, cmdio.NewIO(flags.OutputText, &bytes.Buffer{}, &bytes.Buffer{}, &bytes.Buffer{}, "", "")) + ctx = cmdio.InContext(ctx, cmdio.NewIO(ctx, flags.OutputText, &bytes.Buffer{}, &bytes.Buffer{}, &bytes.Buffer{}, "", "")) ctx = cmdio.NewContext(ctx, cmdio.NewLogger(flags.ModeAppend)) jobApi := m.GetMockJobsAPI() @@ -229,7 +231,7 @@ func TestJobRunnerRestartForContinuousUnpausedJobs(t *testing.T) { m := mocks.NewMockWorkspaceClient(t) b.SetWorkpaceClient(m.WorkspaceClient) ctx := context.Background() - ctx = cmdio.InContext(ctx, cmdio.NewIO(flags.OutputText, &bytes.Buffer{}, &bytes.Buffer{}, &bytes.Buffer{}, "", "...")) + ctx = cmdio.InContext(ctx, cmdio.NewIO(ctx, flags.OutputText, &bytes.Buffer{}, &bytes.Buffer{}, &bytes.Buffer{}, "", "...")) ctx = cmdio.NewContext(ctx, cmdio.NewLogger(flags.ModeAppend)) jobApi := m.GetMockJobsAPI() diff --git a/bundle/run/output/job.go b/bundle/run/output/job.go index 6199ac2f7..2ac974cd5 100644 --- a/bundle/run/output/job.go +++ b/bundle/run/output/job.go @@ -47,7 +47,7 @@ func (out *JobOutput) String() (string, error) { } result.WriteString("=======\n") result.WriteString(fmt.Sprintf("Task %s:\n", v.TaskKey)) - result.WriteString(fmt.Sprintf("%s\n", taskString)) + result.WriteString(taskString + "\n") } return result.String(), nil } diff --git a/bundle/run/output/task.go b/bundle/run/output/task.go index 402e4d66a..53b989e88 100644 --- a/bundle/run/output/task.go +++ b/bundle/run/output/task.go @@ -2,18 +2,19 @@ package output import ( "encoding/json" - "fmt" "github.com/databricks/databricks-sdk-go/service/jobs" ) -type NotebookOutput jobs.NotebookOutput -type DbtOutput jobs.DbtOutput -type SqlOutput jobs.SqlOutput -type LogsOutput struct { - Logs string `json:"logs"` - LogsTruncated bool `json:"logs_truncated"` -} +type ( + NotebookOutput jobs.NotebookOutput + DbtOutput jobs.DbtOutput + SqlOutput jobs.SqlOutput + LogsOutput struct { + Logs string `json:"logs"` + LogsTruncated bool `json:"logs_truncated"` + } +) func structToString(val any) (string, error) { b, err := json.MarshalIndent(val, "", " ") @@ -25,7 +26,7 @@ func structToString(val any) (string, error) { func (out *NotebookOutput) String() (string, error) { if out.Truncated { - return fmt.Sprintf("%s\n[truncated...]\n", out.Result), nil + return out.Result + "\n[truncated...]\n", nil } return out.Result, nil } @@ -40,7 +41,7 @@ func (out *DbtOutput) String() (string, error) { // JSON is used because it's a convenient representation. // If user needs machine parsable output, they can use the --output json // flag - return fmt.Sprintf("Dbt Task Output:\n%s", outputString), nil + return "Dbt Task Output:\n" + outputString, nil } func (out *SqlOutput) String() (string, error) { @@ -53,12 +54,12 @@ func (out *SqlOutput) String() (string, error) { // JSON is used because it's a convenient representation. // If user needs machine parsable output, they can use the --output json // flag - return fmt.Sprintf("SQL Task Output:\n%s", outputString), nil + return "SQL Task Output:\n" + outputString, nil } func (out *LogsOutput) String() (string, error) { if out.LogsTruncated { - return fmt.Sprintf("%s\n[truncated...]\n", out.Logs), nil + return out.Logs + "\n[truncated...]\n", nil } return out.Logs, nil } diff --git a/bundle/run/pipeline.go b/bundle/run/pipeline.go index ffe012843..bdcf0f142 100644 --- a/bundle/run/pipeline.go +++ b/bundle/run/pipeline.go @@ -2,6 +2,7 @@ package run import ( "context" + "errors" "fmt" "time" @@ -17,7 +18,7 @@ import ( func filterEventsByUpdateId(events []pipelines.PipelineEvent, updateId string) []pipelines.PipelineEvent { result := []pipelines.PipelineEvent{} - for i := 0; i < len(events); i++ { + for i := range events { if events[i].Origin.UpdateId == updateId { result = append(result, events[i]) } @@ -32,16 +33,16 @@ func (r *pipelineRunner) logEvent(ctx context.Context, event pipelines.PipelineE } if event.Error != nil && len(event.Error.Exceptions) > 0 { logString += "trace for most recent exception: \n" - for i := 0; i < len(event.Error.Exceptions); i++ { - logString += fmt.Sprintf("%s\n", event.Error.Exceptions[i].Message) + for i := range len(event.Error.Exceptions) { + logString += event.Error.Exceptions[i].Message + "\n" } } if logString != "" { - log.Errorf(ctx, fmt.Sprintf("[%s] %s", event.EventType, logString)) + log.Errorf(ctx, "[%s] %s", event.EventType, logString) } } -func (r *pipelineRunner) logErrorEvent(ctx context.Context, pipelineId string, updateId string) error { +func (r *pipelineRunner) logErrorEvent(ctx context.Context, pipelineId, updateId string) error { w := r.bundle.WorkspaceClient() // Note: For a 100 percent correct and complete solution we should use the @@ -85,16 +86,11 @@ func (r *pipelineRunner) Name() string { } func (r *pipelineRunner) Run(ctx context.Context, opts *Options) (output.RunOutput, error) { - var pipelineID = r.pipeline.ID + pipelineID := r.pipeline.ID // Include resource key in logger. ctx = log.NewContext(ctx, log.GetLogger(ctx).With("resource", r.Key())) w := r.bundle.WorkspaceClient() - _, err := w.Pipelines.GetByPipelineId(ctx, pipelineID) - if err != nil { - log.Warnf(ctx, "Cannot get pipeline: %s", err) - return nil, err - } req, err := opts.Pipeline.toPayload(r.pipeline, pipelineID) if err != nil { @@ -112,7 +108,7 @@ func (r *pipelineRunner) Run(ctx context.Context, opts *Options) (output.RunOutp updateTracker := progress.NewUpdateTracker(pipelineID, updateID, w) progressLogger, ok := cmdio.FromContext(ctx) if !ok { - return nil, fmt.Errorf("no progress logger found") + return nil, errors.New("no progress logger found") } // Log the pipeline update URL as soon as it is available. @@ -132,7 +128,7 @@ func (r *pipelineRunner) Run(ctx context.Context, opts *Options) (output.RunOutp } for _, event := range events { progressLogger.Log(&event) - log.Infof(ctx, event.String()) + log.Info(ctx, event.String()) } update, err := w.Pipelines.GetUpdateByPipelineIdAndUpdateId(ctx, pipelineID, updateID) @@ -149,7 +145,7 @@ func (r *pipelineRunner) Run(ctx context.Context, opts *Options) (output.RunOutp if state == pipelines.UpdateInfoStateCanceled { log.Infof(ctx, "Update was cancelled!") - return nil, fmt.Errorf("update cancelled") + return nil, errors.New("update cancelled") } if state == pipelines.UpdateInfoStateFailed { log.Infof(ctx, "Update has failed!") @@ -157,7 +153,7 @@ func (r *pipelineRunner) Run(ctx context.Context, opts *Options) (output.RunOutp if err != nil { return nil, err } - return nil, fmt.Errorf("update failed") + return nil, errors.New("update failed") } if state == pipelines.UpdateInfoStateCompleted { log.Infof(ctx, "Update has completed successfully!") @@ -173,7 +169,6 @@ func (r *pipelineRunner) Cancel(ctx context.Context) error { wait, err := w.Pipelines.Stop(ctx, pipelines.StopRequest{ PipelineId: r.pipeline.ID, }) - if err != nil { return err } diff --git a/bundle/run/pipeline_test.go b/bundle/run/pipeline_test.go index e4608061c..bfa0c5846 100644 --- a/bundle/run/pipeline_test.go +++ b/bundle/run/pipeline_test.go @@ -76,7 +76,7 @@ func TestPipelineRunnerRestart(t *testing.T) { } b.SetWorkpaceClient(m.WorkspaceClient) ctx := context.Background() - ctx = cmdio.InContext(ctx, cmdio.NewIO(flags.OutputText, &bytes.Buffer{}, &bytes.Buffer{}, &bytes.Buffer{}, "", "...")) + ctx = cmdio.InContext(ctx, cmdio.NewIO(ctx, flags.OutputText, &bytes.Buffer{}, &bytes.Buffer{}, &bytes.Buffer{}, "", "...")) ctx = cmdio.NewContext(ctx, cmdio.NewLogger(flags.ModeAppend)) mockWait := &pipelines.WaitGetPipelineIdle[struct{}]{ @@ -90,8 +90,6 @@ func TestPipelineRunnerRestart(t *testing.T) { PipelineId: "123", }).Return(mockWait, nil) - pipelineApi.EXPECT().GetByPipelineId(mock.Anything, "123").Return(&pipelines.GetPipelineResponse{}, nil) - // Mock runner starting a new update pipelineApi.EXPECT().StartUpdate(mock.Anything, pipelines.StartUpdate{ PipelineId: "123", diff --git a/bundle/run/progress/pipeline.go b/bundle/run/progress/pipeline.go index 4a256e76c..ce92c4cde 100644 --- a/bundle/run/progress/pipeline.go +++ b/bundle/run/progress/pipeline.go @@ -33,7 +33,7 @@ func (event *ProgressEvent) String() string { // construct error string if level=`Error` if event.Level == pipelines.EventLevelError && event.Error != nil { for _, exception := range event.Error.Exceptions { - result.WriteString(fmt.Sprintf("\n%s", exception.Message)) + result.WriteString("\n" + exception.Message) } } return result.String() @@ -51,7 +51,7 @@ type UpdateTracker struct { w *databricks.WorkspaceClient } -func NewUpdateTracker(pipelineId string, updateId string, w *databricks.WorkspaceClient) *UpdateTracker { +func NewUpdateTracker(pipelineId, updateId string, w *databricks.WorkspaceClient) *UpdateTracker { return &UpdateTracker{ w: w, PipelineId: pipelineId, diff --git a/bundle/schema/embed_test.go b/bundle/schema/embed_test.go index e4b45baa5..59f1458cb 100644 --- a/bundle/schema/embed_test.go +++ b/bundle/schema/embed_test.go @@ -41,33 +41,23 @@ func TestJsonSchema(t *testing.T) { resourceJob := walk(s.Definitions, "github.com", "databricks", "cli", "bundle", "config", "resources.Job") fields := []string{"name", "continuous", "tasks", "trigger"} for _, field := range fields { - assert.NotEmpty(t, resourceJob.AnyOf[0].Properties[field].Description) + assert.NotEmpty(t, resourceJob.OneOf[0].Properties[field].Description) } // Assert descriptions were also loaded for a job task definition. jobTask := walk(s.Definitions, "github.com", "databricks", "databricks-sdk-go", "service", "jobs.Task") fields = []string{"notebook_task", "spark_jar_task", "spark_python_task", "spark_submit_task", "description", "depends_on", "environment_key", "for_each_task", "existing_cluster_id"} for _, field := range fields { - assert.NotEmpty(t, jobTask.AnyOf[0].Properties[field].Description) + assert.NotEmpty(t, jobTask.OneOf[0].Properties[field].Description) } // Assert descriptions are loaded for pipelines pipeline := walk(s.Definitions, "github.com", "databricks", "cli", "bundle", "config", "resources.Pipeline") fields = []string{"name", "catalog", "clusters", "channel", "continuous", "development"} for _, field := range fields { - assert.NotEmpty(t, pipeline.AnyOf[0].Properties[field].Description) + assert.NotEmpty(t, pipeline.OneOf[0].Properties[field].Description) } - // Assert enum values are loaded - schedule := walk(s.Definitions, "github.com", "databricks", "databricks-sdk-go", "service", "pipelines.RestartWindow") - assert.Contains(t, schedule.AnyOf[0].Properties["days_of_week"].Enum, "MONDAY") - assert.Contains(t, schedule.AnyOf[0].Properties["days_of_week"].Enum, "TUESDAY") - assert.Contains(t, schedule.AnyOf[0].Properties["days_of_week"].Enum, "WEDNESDAY") - assert.Contains(t, schedule.AnyOf[0].Properties["days_of_week"].Enum, "THURSDAY") - assert.Contains(t, schedule.AnyOf[0].Properties["days_of_week"].Enum, "FRIDAY") - assert.Contains(t, schedule.AnyOf[0].Properties["days_of_week"].Enum, "SATURDAY") - assert.Contains(t, schedule.AnyOf[0].Properties["days_of_week"].Enum, "SUNDAY") - providers := walk(s.Definitions, "github.com", "databricks", "databricks-sdk-go", "service", "jobs.GitProvider") assert.Contains(t, providers.Enum, "gitHub") assert.Contains(t, providers.Enum, "bitbucketCloud") diff --git a/bundle/schema/jsonschema.json b/bundle/schema/jsonschema.json index f791b8440..2f78ffcca 100644 --- a/bundle/schema/jsonschema.json +++ b/bundle/schema/jsonschema.json @@ -1,7 +1,7 @@ { "$defs": { "bool": { - "anyOf": [ + "oneOf": [ { "type": "boolean" }, @@ -28,7 +28,7 @@ ] }, "float64": { - "anyOf": [ + "oneOf": [ { "type": "number" }, @@ -60,7 +60,7 @@ "bundle": { "config": { "resources.Cluster": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -130,6 +130,13 @@ "description": "The optional ID of the instance pool to which the cluster belongs.", "$ref": "#/$defs/string" }, + "is_single_node": { + "description": "This field can only be used with `kind`.\n\nWhen set to true, Databricks will automatically set single node related `custom_tags`, `spark_conf`, and `num_workers`\n", + "$ref": "#/$defs/bool" + }, + "kind": { + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/compute.Kind" + }, "node_type_id": { "description": "This field encodes, through a single value, the resources available to each of\nthe Spark nodes in this cluster. For example, the Spark nodes can be provisioned\nand optimized for memory or compute intensive workloads. A list of available node\ntypes can be retrieved by using the :method:clusters/listNodeTypes API call.\n", "$ref": "#/$defs/string" @@ -168,6 +175,10 @@ "description": "SSH public key contents that will be added to each Spark node in this cluster. The\ncorresponding private keys can be used to login with the user name `ubuntu` on port `2200`.\nUp to 10 keys can be specified.", "$ref": "#/$defs/slice/string" }, + "use_ml_runtime": { + "description": "This field can only be used with `kind`.\n\n`effective_spark_version` is determined by `spark_version` (DBR release), this field `use_ml_runtime`, and whether `node_type_id` is gpu node or not.\n", + "$ref": "#/$defs/bool" + }, "workload_type": { "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/compute.WorkloadType" } @@ -181,7 +192,7 @@ ] }, "resources.Dashboard": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -244,14 +255,16 @@ ] }, "resources.Grant": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { "principal": { + "description": "The name of the principal that will be granted privileges", "$ref": "#/$defs/string" }, "privileges": { + "description": "The privileges to grant to the specified entity", "$ref": "#/$defs/slice/string" } }, @@ -268,7 +281,7 @@ ] }, "resources.Job": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -363,7 +376,7 @@ ] }, "resources.MlflowExperiment": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -408,7 +421,7 @@ ] }, "resources.MlflowModel": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -453,7 +466,7 @@ ] }, "resources.ModelServingEndpoint": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -498,20 +511,24 @@ ] }, "resources.Permission": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { "group_name": { + "description": "The name of the group that has the permission set in level.", "$ref": "#/$defs/string" }, "level": { + "description": "The allowed permission for user, group, service principal defined for this permission.", "$ref": "#/$defs/string" }, "service_principal_name": { + "description": "The name of the service principal that has the permission set in level.", "$ref": "#/$defs/string" }, "user_name": { + "description": "The name of the user that has the permission set in level.", "$ref": "#/$defs/string" } }, @@ -527,7 +544,7 @@ ] }, "resources.Pipeline": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -636,7 +653,7 @@ ] }, "resources.QualityMonitor": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -710,7 +727,7 @@ ] }, "resources.RegisteredModel": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -752,7 +769,7 @@ ] }, "resources.Schema": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -792,7 +809,7 @@ ] }, "resources.Volume": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -837,7 +854,7 @@ ] }, "variable.Lookup": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -895,12 +912,15 @@ "$ref": "#/$defs/interface" }, "description": { + "description": "The description of the variable.", "$ref": "#/$defs/string" }, "lookup": { + "description": "The name of the alert, cluster_policy, cluster, dashboard, instance_pool, job, metastore, pipeline, query, service_principal, or warehouse object for which to retrieve an ID.", "$ref": "#/$defs/github.com/databricks/cli/bundle/config/variable.Lookup" }, "type": { + "description": "The type of the variable.", "$ref": "#/$defs/github.com/databricks/cli/bundle/config/variable.VariableType" } }, @@ -916,12 +936,16 @@ "$ref": "#/$defs/interface" }, "description": { + "description": "The description of the variable", "$ref": "#/$defs/string" }, "lookup": { - "$ref": "#/$defs/github.com/databricks/cli/bundle/config/variable.Lookup" + "description": "The name of the alert, cluster_policy, cluster, dashboard, instance_pool, job, metastore, pipeline, query, service_principal, or warehouse object for which to retrieve an ID.", + "$ref": "#/$defs/github.com/databricks/cli/bundle/config/variable.Lookup", + "markdownDescription": "The name of the `alert`, `cluster_policy`, `cluster`, `dashboard`, `instance_pool`, `job`, `metastore`, `pipeline`, `query`, `service_principal`, or `warehouse` object for which to retrieve an ID.\"" }, "type": { + "description": "The type of the variable.", "$ref": "#/$defs/github.com/databricks/cli/bundle/config/variable.VariableType" } }, @@ -932,24 +956,31 @@ } }, "config.Artifact": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { "build": { + "description": "An optional set of non-default build commands that you want to run locally before deployment.\n\nFor Python wheel builds, the Databricks CLI assumes that it can find a local install of the Python wheel package to run builds, and it runs the command python setup.py bdist_wheel by default during each bundle deployment.\n\nTo specify multiple build commands, separate each command with double-ampersand (\u0026\u0026) characters.", "$ref": "#/$defs/string" }, "executable": { + "description": "The executable type.", "$ref": "#/$defs/github.com/databricks/cli/libs/exec.ExecutableType" }, "files": { - "$ref": "#/$defs/slice/github.com/databricks/cli/bundle/config.ArtifactFile" + "description": "The source files for the artifact.", + "$ref": "#/$defs/slice/github.com/databricks/cli/bundle/config.ArtifactFile", + "markdownDescription": "The source files for the artifact, defined as an [artifact_file](https://docs.databricks.com/dev-tools/bundles/reference.html#artifact_file)." }, "path": { + "description": "The location where the built artifact will be saved.", "$ref": "#/$defs/string" }, "type": { - "$ref": "#/$defs/github.com/databricks/cli/bundle/config.ArtifactType" + "description": "The type of the artifact.", + "$ref": "#/$defs/github.com/databricks/cli/bundle/config.ArtifactType", + "markdownDescription": "The type of the artifact. Valid values are `wheel` or `jar`" } }, "additionalProperties": false, @@ -964,11 +995,12 @@ ] }, "config.ArtifactFile": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { "source": { + "description": "The path of the files used to build the artifact.", "$ref": "#/$defs/string" } }, @@ -987,26 +1019,35 @@ "type": "string" }, "config.Bundle": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { "cluster_id": { - "$ref": "#/$defs/string" + "description": "The ID of a cluster to use to run the bundle.", + "$ref": "#/$defs/string", + "markdownDescription": "The ID of a cluster to use to run the bundle. See [cluster_id](https://docs.databricks.com/dev-tools/bundles/settings.html#cluster_id)." }, "compute_id": { "$ref": "#/$defs/string" }, "databricks_cli_version": { - "$ref": "#/$defs/string" + "description": "The Databricks CLI version to use for the bundle.", + "$ref": "#/$defs/string", + "markdownDescription": "The Databricks CLI version to use for the bundle. See [databricks_cli_version](https://docs.databricks.com/dev-tools/bundles/settings.html#databricks_cli_version)." }, "deployment": { - "$ref": "#/$defs/github.com/databricks/cli/bundle/config.Deployment" + "description": "The definition of the bundle deployment", + "$ref": "#/$defs/github.com/databricks/cli/bundle/config.Deployment", + "markdownDescription": "The definition of the bundle deployment. For supported attributes, see [deployment](https://docs.databricks.com/dev-tools/bundles/reference.html#deployment) and [link](https://docs.databricks.com/dev-tools/bundles/deployment-modes.html)." }, "git": { - "$ref": "#/$defs/github.com/databricks/cli/bundle/config.Git" + "description": "The Git version control details that are associated with your bundle.", + "$ref": "#/$defs/github.com/databricks/cli/bundle/config.Git", + "markdownDescription": "The Git version control details that are associated with your bundle. For supported attributes, see [git](https://docs.databricks.com/dev-tools/bundles/reference.html#git) and [git](https://docs.databricks.com/dev-tools/bundles/settings.html#git)." }, "name": { + "description": "The name of the bundle.", "$ref": "#/$defs/string" }, "uuid": { @@ -1028,15 +1069,18 @@ "type": "string" }, "config.Deployment": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { "fail_on_active_runs": { + "description": "Whether to fail on active runs. If this is set to true a deployment that is running can be interrupted.", "$ref": "#/$defs/bool" }, "lock": { - "$ref": "#/$defs/github.com/databricks/cli/bundle/config.Lock" + "description": "The deployment lock attributes.", + "$ref": "#/$defs/github.com/databricks/cli/bundle/config.Lock", + "markdownDescription": "The deployment lock attributes. See [lock](https://docs.databricks.com/dev-tools/bundles/reference.html#lock)." } }, "additionalProperties": false @@ -1048,20 +1092,28 @@ ] }, "config.Experimental": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { "pydabs": { + "description": "The PyDABs configuration.", "$ref": "#/$defs/github.com/databricks/cli/bundle/config.PyDABs" }, + "python": { + "description": "Configures loading of Python code defined with 'databricks-bundles' package.", + "$ref": "#/$defs/github.com/databricks/cli/bundle/config.Python" + }, "python_wheel_wrapper": { + "description": "Whether to use a Python wheel wrapper", "$ref": "#/$defs/bool" }, "scripts": { + "description": "The commands to run", "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config.Command" }, "use_legacy_run_as": { + "description": "Whether to use the legacy run_as behavior", "$ref": "#/$defs/bool" } }, @@ -1074,15 +1126,19 @@ ] }, "config.Git": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { "branch": { - "$ref": "#/$defs/string" + "description": "The Git branch name.", + "$ref": "#/$defs/string", + "markdownDescription": "The Git branch name. See [git](https://docs.databricks.com/dev-tools/bundles/settings.html#git)." }, "origin_url": { - "$ref": "#/$defs/string" + "description": "The origin URL of the repository.", + "$ref": "#/$defs/string", + "markdownDescription": "The origin URL of the repository. See [git](https://docs.databricks.com/dev-tools/bundles/settings.html#git)." } }, "additionalProperties": false @@ -1094,14 +1150,16 @@ ] }, "config.Lock": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { "enabled": { + "description": "Whether this lock is enabled.", "$ref": "#/$defs/bool" }, "force": { + "description": "Whether to force this lock if it is enabled.", "$ref": "#/$defs/bool" } }, @@ -1117,26 +1175,32 @@ "type": "string" }, "config.Presets": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { "jobs_max_concurrent_runs": { + "description": "The maximum concurrent runs for a job.", "$ref": "#/$defs/int" }, "name_prefix": { + "description": "The prefix for job runs of the bundle.", "$ref": "#/$defs/string" }, "pipelines_development": { + "description": "Whether pipeline deployments should be locked in development mode.", "$ref": "#/$defs/bool" }, "source_linked_deployment": { + "description": "Whether to link the deployment to the bundle source.", "$ref": "#/$defs/bool" }, "tags": { + "description": "The tags for the bundle deployment.", "$ref": "#/$defs/map/string" }, "trigger_pause_status": { + "description": "A pause status to apply to all job triggers and schedules. Valid values are PAUSED or UNPAUSED.", "$ref": "#/$defs/string" } }, @@ -1149,17 +1213,20 @@ ] }, "config.PyDABs": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { "enabled": { + "description": "Whether or not PyDABs (Private Preview) is enabled", "$ref": "#/$defs/bool" }, "import": { + "description": "The PyDABs project to import to discover resources, resource generator and mutators", "$ref": "#/$defs/slice/string" }, "venv_path": { + "description": "The Python virtual environment path", "$ref": "#/$defs/string" } }, @@ -1171,40 +1238,90 @@ } ] }, + "config.Python": { + "oneOf": [ + { + "type": "object", + "properties": { + "mutators": { + "description": "Mutators contains a list of fully qualified function paths to mutator functions.\n\nExample: [\"my_project.mutators:add_default_cluster\"]", + "$ref": "#/$defs/slice/string" + }, + "resources": { + "description": "Resources contains a list of fully qualified function paths to load resources\ndefined in Python code.\n\nExample: [\"my_project.resources:load_resources\"]", + "$ref": "#/$defs/slice/string" + }, + "venv_path": { + "description": "VEnvPath is path to the virtual environment.\n\nIf enabled, Python code will execute within this environment. If disabled,\nit defaults to using the Python interpreter available in the current shell.", + "$ref": "#/$defs/string" + } + }, + "additionalProperties": false, + "required": [ + "resources", + "mutators" + ] + }, + { + "type": "string", + "pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}" + } + ] + }, "config.Resources": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { "clusters": { - "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.Cluster" + "description": "The cluster definitions for the bundle.", + "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.Cluster", + "markdownDescription": "The cluster definitions for the bundle. See [cluster](https://docs.databricks.com/dev-tools/bundles/resources.html#cluster)" }, "dashboards": { - "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.Dashboard" + "description": "The dashboard definitions for the bundle.", + "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.Dashboard", + "markdownDescription": "The dashboard definitions for the bundle. See [dashboard](https://docs.databricks.com/dev-tools/bundles/resources.html#dashboard)" }, "experiments": { - "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.MlflowExperiment" + "description": "The experiment definitions for the bundle.", + "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.MlflowExperiment", + "markdownDescription": "The experiment definitions for the bundle. See [experiment](https://docs.databricks.com/dev-tools/bundles/resources.html#experiment)" }, "jobs": { - "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.Job" + "description": "The job definitions for the bundle.", + "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.Job", + "markdownDescription": "The job definitions for the bundle. See [job](https://docs.databricks.com/dev-tools/bundles/resources.html#job)" }, "model_serving_endpoints": { - "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.ModelServingEndpoint" + "description": "The model serving endpoint definitions for the bundle.", + "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.ModelServingEndpoint", + "markdownDescription": "The model serving endpoint definitions for the bundle. See [model_serving_endpoint](https://docs.databricks.com/dev-tools/bundles/resources.html#model_serving_endpoint)" }, "models": { - "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.MlflowModel" + "description": "The model definitions for the bundle.", + "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.MlflowModel", + "markdownDescription": "The model definitions for the bundle. See [model](https://docs.databricks.com/dev-tools/bundles/resources.html#model)" }, "pipelines": { - "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.Pipeline" + "description": "The pipeline definitions for the bundle.", + "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.Pipeline", + "markdownDescription": "The pipeline definitions for the bundle. See [pipeline](https://docs.databricks.com/dev-tools/bundles/resources.html#pipeline)" }, "quality_monitors": { - "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.QualityMonitor" + "description": "The quality monitor definitions for the bundle.", + "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.QualityMonitor", + "markdownDescription": "The quality monitor definitions for the bundle. See [quality_monitor](https://docs.databricks.com/dev-tools/bundles/resources.html#quality_monitor)" }, "registered_models": { - "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.RegisteredModel" + "description": "The registered model definitions for the bundle.", + "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.RegisteredModel", + "markdownDescription": "The registered model definitions for the bundle. See [registered_model](https://docs.databricks.com/dev-tools/bundles/resources.html#registered_model)" }, "schemas": { - "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.Schema" + "description": "The schema definitions for the bundle.", + "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.Schema", + "markdownDescription": "The schema definitions for the bundle. See [schema](https://docs.databricks.com/dev-tools/bundles/resources.html#schema)" }, "volumes": { "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.Volume" @@ -1219,17 +1336,20 @@ ] }, "config.Sync": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { "exclude": { + "description": "A list of files or folders to exclude from the bundle.", "$ref": "#/$defs/slice/string" }, "include": { + "description": "A list of files or folders to include in the bundle.", "$ref": "#/$defs/slice/string" }, "paths": { + "description": "The local folder paths, which can be outside the bundle root, to synchronize to the workspace when the bundle is deployed.", "$ref": "#/$defs/slice/string" } }, @@ -1242,51 +1362,75 @@ ] }, "config.Target": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { "artifacts": { - "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config.Artifact" + "description": "The artifacts to include in the target deployment.", + "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config.Artifact", + "markdownDescription": "The artifacts to include in the target deployment. See [artifact](https://docs.databricks.com/dev-tools/bundles/reference.html#artifact)" }, "bundle": { + "description": "The name of the bundle when deploying to this target.", "$ref": "#/$defs/github.com/databricks/cli/bundle/config.Bundle" }, "cluster_id": { + "description": "The ID of the cluster to use for this target.", "$ref": "#/$defs/string" }, "compute_id": { + "description": "Deprecated. The ID of the compute to use for this target.", "$ref": "#/$defs/string" }, "default": { + "description": "Whether this target is the default target.", "$ref": "#/$defs/bool" }, "git": { - "$ref": "#/$defs/github.com/databricks/cli/bundle/config.Git" + "description": "The Git version control settings for the target.", + "$ref": "#/$defs/github.com/databricks/cli/bundle/config.Git", + "markdownDescription": "The Git version control settings for the target. See [git](https://docs.databricks.com/dev-tools/bundles/reference.html#git)." }, "mode": { - "$ref": "#/$defs/github.com/databricks/cli/bundle/config.Mode" + "description": "The deployment mode for the target.", + "$ref": "#/$defs/github.com/databricks/cli/bundle/config.Mode", + "markdownDescription": "The deployment mode for the target. Valid values are `development` or `production`. See [link](https://docs.databricks.com/dev-tools/bundles/deployment-modes.html)." }, "permissions": { - "$ref": "#/$defs/slice/github.com/databricks/cli/bundle/config/resources.Permission" + "description": "The permissions for deploying and running the bundle in the target.", + "$ref": "#/$defs/slice/github.com/databricks/cli/bundle/config/resources.Permission", + "markdownDescription": "The permissions for deploying and running the bundle in the target. See [permission](https://docs.databricks.com/dev-tools/bundles/reference.html#permission)." }, "presets": { - "$ref": "#/$defs/github.com/databricks/cli/bundle/config.Presets" + "description": "The deployment presets for the target.", + "$ref": "#/$defs/github.com/databricks/cli/bundle/config.Presets", + "markdownDescription": "The deployment presets for the target. See [preset](https://docs.databricks.com/dev-tools/bundles/reference.html#preset)." }, "resources": { - "$ref": "#/$defs/github.com/databricks/cli/bundle/config.Resources" + "description": "The resource definitions for the target.", + "$ref": "#/$defs/github.com/databricks/cli/bundle/config.Resources", + "markdownDescription": "The resource definitions for the target. See [resources](https://docs.databricks.com/dev-tools/bundles/reference.html#resources)." }, "run_as": { - "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.JobRunAs" + "description": "The identity to use to run the bundle.", + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.JobRunAs", + "markdownDescription": "The identity to use to run the bundle. See [job_run_as](https://docs.databricks.com/dev-tools/bundles/reference.html#job_run_as) and [link](https://docs.databricks.com/dev-tools/bundles/run_as.html)." }, "sync": { - "$ref": "#/$defs/github.com/databricks/cli/bundle/config.Sync" + "description": "The local paths to sync to the target workspace when a bundle is run or deployed.", + "$ref": "#/$defs/github.com/databricks/cli/bundle/config.Sync", + "markdownDescription": "The local paths to sync to the target workspace when a bundle is run or deployed. See [sync](https://docs.databricks.com/dev-tools/bundles/reference.html#sync)." }, "variables": { - "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/variable.TargetVariable" + "description": "The custom variable definitions for the target.", + "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/variable.TargetVariable", + "markdownDescription": "The custom variable definitions for the target. See [variables](https://docs.databricks.com/dev-tools/bundles/settings.html#variables) and [link](https://docs.databricks.com/dev-tools/bundles/variables.html)." }, "workspace": { - "$ref": "#/$defs/github.com/databricks/cli/bundle/config.Workspace" + "description": "The Databricks workspace for the target.", + "$ref": "#/$defs/github.com/databricks/cli/bundle/config.Workspace", + "markdownDescription": "The Databricks workspace for the target. [workspace](https://docs.databricks.com/dev-tools/bundles/reference.html#workspace)" } }, "additionalProperties": false @@ -1298,56 +1442,72 @@ ] }, "config.Workspace": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { "artifact_path": { + "description": "The artifact path to use within the workspace for both deployments and workflow runs", "$ref": "#/$defs/string" }, "auth_type": { + "description": "The authentication type.", "$ref": "#/$defs/string" }, "azure_client_id": { + "description": "The Azure client ID", "$ref": "#/$defs/string" }, "azure_environment": { + "description": "The Azure environment", "$ref": "#/$defs/string" }, "azure_login_app_id": { + "description": "The Azure login app ID", "$ref": "#/$defs/string" }, "azure_tenant_id": { + "description": "The Azure tenant ID", "$ref": "#/$defs/string" }, "azure_use_msi": { + "description": "Whether to use MSI for Azure", "$ref": "#/$defs/bool" }, "azure_workspace_resource_id": { + "description": "The Azure workspace resource ID", "$ref": "#/$defs/string" }, "client_id": { + "description": "The client ID for the workspace", "$ref": "#/$defs/string" }, "file_path": { + "description": "The file path to use within the workspace for both deployments and workflow runs", "$ref": "#/$defs/string" }, "google_service_account": { + "description": "The Google service account name", "$ref": "#/$defs/string" }, "host": { + "description": "The Databricks workspace host URL", "$ref": "#/$defs/string" }, "profile": { + "description": "The Databricks workspace profile name", "$ref": "#/$defs/string" }, "resource_path": { + "description": "The workspace resource path", "$ref": "#/$defs/string" }, "root_path": { + "description": "The Databricks workspace root path", "$ref": "#/$defs/string" }, "state_path": { + "description": "The workspace state path", "$ref": "#/$defs/string" } }, @@ -1369,7 +1529,7 @@ "databricks-sdk-go": { "service": { "catalog.MonitorCronSchedule": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -1407,7 +1567,7 @@ ] }, "catalog.MonitorDataClassificationConfig": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -1425,7 +1585,7 @@ ] }, "catalog.MonitorDestination": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -1443,7 +1603,7 @@ ] }, "catalog.MonitorInferenceLog": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -1500,7 +1660,7 @@ ] }, "catalog.MonitorMetric": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -1550,7 +1710,7 @@ ] }, "catalog.MonitorNotifications": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -1572,7 +1732,7 @@ ] }, "catalog.MonitorSnapshot": { - "anyOf": [ + "oneOf": [ { "type": "object", "additionalProperties": false @@ -1584,7 +1744,7 @@ ] }, "catalog.MonitorTimeSeries": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -1617,7 +1777,7 @@ ] }, "compute.Adlsgen2Info": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -1638,7 +1798,7 @@ ] }, "compute.AutoScale": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -1660,7 +1820,7 @@ ] }, "compute.AwsAttributes": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -1721,7 +1881,7 @@ ] }, "compute.AzureAttributes": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -1759,7 +1919,7 @@ ] }, "compute.ClientsTypes": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -1781,7 +1941,7 @@ ] }, "compute.ClusterLogConf": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -1803,7 +1963,7 @@ ] }, "compute.ClusterSpec": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -1873,6 +2033,13 @@ "description": "The optional ID of the instance pool to which the cluster belongs.", "$ref": "#/$defs/string" }, + "is_single_node": { + "description": "This field can only be used with `kind`.\n\nWhen set to true, Databricks will automatically set single node related `custom_tags`, `spark_conf`, and `num_workers`\n", + "$ref": "#/$defs/bool" + }, + "kind": { + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/compute.Kind" + }, "node_type_id": { "description": "This field encodes, through a single value, the resources available to each of\nthe Spark nodes in this cluster. For example, the Spark nodes can be provisioned\nand optimized for memory or compute intensive workloads. A list of available node\ntypes can be retrieved by using the :method:clusters/listNodeTypes API call.\n", "$ref": "#/$defs/string" @@ -1908,6 +2075,10 @@ "description": "SSH public key contents that will be added to each Spark node in this cluster. The\ncorresponding private keys can be used to login with the user name `ubuntu` on port `2200`.\nUp to 10 keys can be specified.", "$ref": "#/$defs/slice/string" }, + "use_ml_runtime": { + "description": "This field can only be used with `kind`.\n\n`effective_spark_version` is determined by `spark_version` (DBR release), this field `use_ml_runtime`, and whether `node_type_id` is gpu node or not.\n", + "$ref": "#/$defs/bool" + }, "workload_type": { "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/compute.WorkloadType" } @@ -1922,8 +2093,11 @@ }, "compute.DataSecurityMode": { "type": "string", - "description": "Data security mode decides what data governance model to use when accessing data\nfrom a cluster.\n\n* `NONE`: No security isolation for multiple users sharing the cluster. Data governance features are not available in this mode.\n* `SINGLE_USER`: A secure cluster that can only be exclusively used by a single user specified in `single_user_name`. Most programming languages, cluster features and data governance features are available in this mode.\n* `USER_ISOLATION`: A secure cluster that can be shared by multiple users. Cluster users are fully isolated so that they cannot see each other's data and credentials. Most data governance features are supported in this mode. But programming languages and cluster features might be limited.\n\nThe following modes are deprecated starting with Databricks Runtime 15.0 and\nwill be removed for future Databricks Runtime versions:\n\n* `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table ACL clusters.\n* `LEGACY_PASSTHROUGH`: This mode is for users migrating from legacy Passthrough on high concurrency clusters.\n* `LEGACY_SINGLE_USER`: This mode is for users migrating from legacy Passthrough on standard clusters.\n* `LEGACY_SINGLE_USER_STANDARD`: This mode provides a way that doesn’t have UC nor passthrough enabled.\n", + "description": "Data security mode decides what data governance model to use when accessing data\nfrom a cluster.\n\nThe following modes can only be used with `kind`.\n* `DATA_SECURITY_MODE_AUTO`: Databricks will choose the most appropriate access mode depending on your compute configuration.\n* `DATA_SECURITY_MODE_STANDARD`: Alias for `USER_ISOLATION`.\n* `DATA_SECURITY_MODE_DEDICATED`: Alias for `SINGLE_USER`.\n\nThe following modes can be used regardless of `kind`.\n* `NONE`: No security isolation for multiple users sharing the cluster. Data governance features are not available in this mode.\n* `SINGLE_USER`: A secure cluster that can only be exclusively used by a single user specified in `single_user_name`. Most programming languages, cluster features and data governance features are available in this mode.\n* `USER_ISOLATION`: A secure cluster that can be shared by multiple users. Cluster users are fully isolated so that they cannot see each other's data and credentials. Most data governance features are supported in this mode. But programming languages and cluster features might be limited.\n\nThe following modes are deprecated starting with Databricks Runtime 15.0 and\nwill be removed for future Databricks Runtime versions:\n\n* `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table ACL clusters.\n* `LEGACY_PASSTHROUGH`: This mode is for users migrating from legacy Passthrough on high concurrency clusters.\n* `LEGACY_SINGLE_USER`: This mode is for users migrating from legacy Passthrough on standard clusters.\n* `LEGACY_SINGLE_USER_STANDARD`: This mode provides a way that doesn’t have UC nor passthrough enabled.\n", "enum": [ + "DATA_SECURITY_MODE_AUTO", + "DATA_SECURITY_MODE_STANDARD", + "DATA_SECURITY_MODE_DEDICATED", "NONE", "SINGLE_USER", "USER_ISOLATION", @@ -1934,7 +2108,7 @@ ] }, "compute.DbfsStorageInfo": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -1955,7 +2129,7 @@ ] }, "compute.DockerBasicAuth": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -1977,7 +2151,7 @@ ] }, "compute.DockerImage": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -2006,7 +2180,7 @@ ] }, "compute.Environment": { - "anyOf": [ + "oneOf": [ { "type": "object", "description": "The environment entity used to preserve serverless environment side panel and jobs' environment for non-notebook task.\nIn this minimal environment spec, only pip dependencies are supported.", @@ -2032,7 +2206,7 @@ ] }, "compute.GcpAttributes": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -2078,7 +2252,7 @@ ] }, "compute.GcsStorageInfo": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -2099,7 +2273,7 @@ ] }, "compute.InitScriptInfo": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -2140,8 +2314,11 @@ } ] }, + "compute.Kind": { + "type": "string" + }, "compute.Library": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -2183,7 +2360,7 @@ ] }, "compute.LocalFileInfo": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -2204,7 +2381,7 @@ ] }, "compute.LogAnalyticsInfo": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -2226,7 +2403,7 @@ ] }, "compute.MavenLibrary": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -2255,7 +2432,7 @@ ] }, "compute.PythonPyPiLibrary": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -2280,7 +2457,7 @@ ] }, "compute.RCranLibrary": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -2314,7 +2491,7 @@ ] }, "compute.S3StorageInfo": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -2359,7 +2536,7 @@ ] }, "compute.VolumesStorageInfo": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -2380,7 +2557,7 @@ ] }, "compute.WorkloadType": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -2401,7 +2578,7 @@ ] }, "compute.WorkspaceStorageInfo": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -2428,6 +2605,40 @@ "TRASHED" ] }, + "jobs.CleanRoomsNotebookTask": { + "oneOf": [ + { + "type": "object", + "properties": { + "clean_room_name": { + "description": "The clean room that the notebook belongs to.", + "$ref": "#/$defs/string" + }, + "etag": { + "description": "Checksum to validate the freshness of the notebook resource (i.e. the notebook being run is the latest version).\nIt can be fetched by calling the :method:cleanroomassets/get API.", + "$ref": "#/$defs/string" + }, + "notebook_base_parameters": { + "description": "Base parameters to be used for the clean room notebook job.", + "$ref": "#/$defs/map/string" + }, + "notebook_name": { + "description": "Name of the notebook being run.", + "$ref": "#/$defs/string" + } + }, + "additionalProperties": false, + "required": [ + "clean_room_name", + "notebook_name" + ] + }, + { + "type": "string", + "pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}" + } + ] + }, "jobs.Condition": { "type": "string", "enum": [ @@ -2436,7 +2647,7 @@ ] }, "jobs.ConditionTask": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -2479,7 +2690,7 @@ ] }, "jobs.Continuous": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -2497,7 +2708,7 @@ ] }, "jobs.CronSchedule": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -2527,7 +2738,7 @@ ] }, "jobs.DbtTask": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -2572,7 +2783,7 @@ ] }, "jobs.FileArrivalTriggerConfiguration": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -2601,7 +2812,7 @@ ] }, "jobs.ForEachTask": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -2651,7 +2862,7 @@ ] }, "jobs.GitSnapshot": { - "anyOf": [ + "oneOf": [ { "type": "object", "description": "Read-only state of the remote repository at the time the job was run. This field is only included on job runs.", @@ -2670,7 +2881,7 @@ ] }, "jobs.GitSource": { - "anyOf": [ + "oneOf": [ { "type": "object", "description": "An optional specification for a remote Git repository containing the source code used by tasks. Version-controlled source code is supported by notebook, dbt, Python script, and SQL File tasks.\n\nIf `git_source` is set, these tasks retrieve the file from the remote repository by default. However, this behavior can be overridden by setting `source` to `WORKSPACE` on the task.\n\nNote: dbt and SQL File tasks support only version-controlled sources. If dbt or SQL File tasks are used, `git_source` must be defined on the job.", @@ -2709,7 +2920,7 @@ ] }, "jobs.JobCluster": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -2735,7 +2946,7 @@ ] }, "jobs.JobDeployment": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -2775,7 +2986,7 @@ ] }, "jobs.JobEmailNotifications": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -2813,7 +3024,7 @@ ] }, "jobs.JobEnvironment": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -2837,7 +3048,7 @@ ] }, "jobs.JobNotificationSettings": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -2859,7 +3070,7 @@ ] }, "jobs.JobParameterDefinition": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -2885,10 +3096,10 @@ ] }, "jobs.JobRunAs": { - "anyOf": [ + "oneOf": [ { "type": "object", - "description": "Write-only setting. Specifies the user, service principal or group that the job/pipeline runs as. If not specified, the job/pipeline runs as the user who created the job/pipeline.\n\nEither `user_name` or `service_principal_name` should be specified. If not, an error is thrown.", + "description": "Write-only setting. Specifies the user or service principal that the job runs as. If not specified, the job runs as the user who created the job.\n\nEither `user_name` or `service_principal_name` should be specified. If not, an error is thrown.", "properties": { "service_principal_name": { "description": "Application ID of an active service principal. Setting this field requires the `servicePrincipal/user` role.", @@ -2908,7 +3119,7 @@ ] }, "jobs.JobSource": { - "anyOf": [ + "oneOf": [ { "type": "object", "description": "The source of the job specification in the remote repository when the job is source controlled.", @@ -2948,7 +3159,7 @@ }, "jobs.JobsHealthMetric": { "type": "string", - "description": "Specifies the health metric that is being evaluated for a particular health rule.\n\n* `RUN_DURATION_SECONDS`: Expected total time for a run in seconds.\n* `STREAMING_BACKLOG_BYTES`: An estimate of the maximum bytes of data waiting to be consumed across all streams. This metric is in Private Preview.\n* `STREAMING_BACKLOG_RECORDS`: An estimate of the maximum offset lag across all streams. This metric is in Private Preview.\n* `STREAMING_BACKLOG_SECONDS`: An estimate of the maximum consumer delay across all streams. This metric is in Private Preview.\n* `STREAMING_BACKLOG_FILES`: An estimate of the maximum number of outstanding files across all streams. This metric is in Private Preview.", + "description": "Specifies the health metric that is being evaluated for a particular health rule.\n\n* `RUN_DURATION_SECONDS`: Expected total time for a run in seconds.\n* `STREAMING_BACKLOG_BYTES`: An estimate of the maximum bytes of data waiting to be consumed across all streams. This metric is in Public Preview.\n* `STREAMING_BACKLOG_RECORDS`: An estimate of the maximum offset lag across all streams. This metric is in Public Preview.\n* `STREAMING_BACKLOG_SECONDS`: An estimate of the maximum consumer delay across all streams. This metric is in Public Preview.\n* `STREAMING_BACKLOG_FILES`: An estimate of the maximum number of outstanding files across all streams. This metric is in Public Preview.", "enum": [ "RUN_DURATION_SECONDS", "STREAMING_BACKLOG_BYTES", @@ -2965,7 +3176,7 @@ ] }, "jobs.JobsHealthRule": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -2994,7 +3205,7 @@ ] }, "jobs.JobsHealthRules": { - "anyOf": [ + "oneOf": [ { "type": "object", "description": "An optional set of health rules that can be defined for this job.", @@ -3012,7 +3223,7 @@ ] }, "jobs.NotebookTask": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -3052,7 +3263,7 @@ ] }, "jobs.PeriodicTriggerConfiguration": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -3086,7 +3297,7 @@ ] }, "jobs.PipelineParams": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -3104,7 +3315,7 @@ ] }, "jobs.PipelineTask": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -3129,7 +3340,7 @@ ] }, "jobs.PythonWheelTask": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -3163,7 +3374,7 @@ ] }, "jobs.QueueSettings": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -3196,7 +3407,7 @@ ] }, "jobs.RunJobTask": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -3260,7 +3471,7 @@ ] }, "jobs.SparkJarTask": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -3286,7 +3497,7 @@ ] }, "jobs.SparkPythonTask": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -3315,7 +3526,7 @@ ] }, "jobs.SparkSubmitTask": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -3333,7 +3544,7 @@ ] }, "jobs.SqlTask": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -3374,7 +3585,7 @@ ] }, "jobs.SqlTaskAlert": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -3403,7 +3614,7 @@ ] }, "jobs.SqlTaskDashboard": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -3436,7 +3647,7 @@ ] }, "jobs.SqlTaskFile": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -3461,7 +3672,7 @@ ] }, "jobs.SqlTaskQuery": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -3482,7 +3693,7 @@ ] }, "jobs.SqlTaskSubscription": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -3504,7 +3715,7 @@ ] }, "jobs.TableUpdateTriggerConfiguration": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -3534,10 +3745,14 @@ ] }, "jobs.Task": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { + "clean_rooms_notebook_task": { + "description": "The task runs a [clean rooms](https://docs.databricks.com/en/clean-rooms/index.html) notebook\nwhen the `clean_rooms_notebook_task` field is present.", + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.CleanRoomsNotebookTask" + }, "condition_task": { "description": "The task evaluates a condition that can be used to control the execution of other tasks when the `condition_task` field is present.\nThe condition task does not require a cluster to execute and does not support retries or notifications.", "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.ConditionTask" @@ -3666,7 +3881,7 @@ ] }, "jobs.TaskDependency": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -3691,7 +3906,7 @@ ] }, "jobs.TaskEmailNotifications": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -3729,7 +3944,7 @@ ] }, "jobs.TaskNotificationSettings": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -3755,7 +3970,7 @@ ] }, "jobs.TriggerSettings": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -3788,7 +4003,7 @@ ] }, "jobs.Webhook": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -3808,7 +4023,7 @@ ] }, "jobs.WebhookNotifications": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -3842,7 +4057,7 @@ ] }, "ml.ExperimentTag": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -3864,7 +4079,7 @@ ] }, "ml.ModelTag": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -3886,7 +4101,7 @@ ] }, "ml.ModelVersion": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -3961,7 +4176,7 @@ ] }, "ml.ModelVersionTag": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -3983,7 +4198,7 @@ ] }, "pipelines.CronTrigger": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -4010,7 +4225,7 @@ ] }, "pipelines.FileLibrary": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -4028,7 +4243,7 @@ ] }, "pipelines.Filters": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -4050,7 +4265,7 @@ ] }, "pipelines.IngestionConfig": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -4076,7 +4291,7 @@ ] }, "pipelines.IngestionGatewayPipelineDefinition": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -4110,7 +4325,7 @@ ] }, "pipelines.IngestionPipelineDefinition": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -4140,7 +4355,7 @@ ] }, "pipelines.ManualTrigger": { - "anyOf": [ + "oneOf": [ { "type": "object", "additionalProperties": false @@ -4152,7 +4367,7 @@ ] }, "pipelines.NotebookLibrary": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -4170,7 +4385,7 @@ ] }, "pipelines.Notifications": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -4192,7 +4407,7 @@ ] }, "pipelines.PipelineCluster": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -4282,7 +4497,7 @@ ] }, "pipelines.PipelineClusterAutoscale": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -4320,7 +4535,7 @@ ] }, "pipelines.PipelineDeployment": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -4342,7 +4557,7 @@ ] }, "pipelines.PipelineLibrary": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -4376,7 +4591,7 @@ ] }, "pipelines.PipelineTrigger": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -4396,7 +4611,7 @@ ] }, "pipelines.ReportSpec": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -4430,22 +4645,13 @@ ] }, "pipelines.RestartWindow": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { "days_of_week": { "description": "Days of week in which the restart is allowed to happen (within a five-hour window starting at start_hour).\nIf not specified all days of the week will be used.", - "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.RestartWindowDaysOfWeek", - "enum": [ - "MONDAY", - "TUESDAY", - "WEDNESDAY", - "THURSDAY", - "FRIDAY", - "SATURDAY", - "SUNDAY" - ] + "$ref": "#/$defs/slice/github.com/databricks/databricks-sdk-go/service/pipelines.RestartWindowDaysOfWeek" }, "start_hour": { "description": "An integer between 0 and 23 denoting the start hour for the restart window in the 24-hour day.\nContinuous pipeline restart is triggered only within a five-hour window starting at this hour.", @@ -4468,10 +4674,20 @@ ] }, "pipelines.RestartWindowDaysOfWeek": { - "type": "string" + "type": "string", + "description": "Days of week in which the restart is allowed to happen (within a five-hour window starting at start_hour).\nIf not specified all days of the week will be used.", + "enum": [ + "MONDAY", + "TUESDAY", + "WEDNESDAY", + "THURSDAY", + "FRIDAY", + "SATURDAY", + "SUNDAY" + ] }, "pipelines.SchemaSpec": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -4505,7 +4721,7 @@ ] }, "pipelines.TableSpec": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -4547,7 +4763,7 @@ ] }, "pipelines.TableSpecificConfig": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -4585,14 +4801,16 @@ ] }, "serving.Ai21LabsConfig": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { "ai21labs_api_key": { + "description": "The Databricks secret key reference for an AI21 Labs API key. If you prefer to paste your API key directly, see `ai21labs_api_key_plaintext`. You must provide an API key using one of the following fields: `ai21labs_api_key` or `ai21labs_api_key_plaintext`.", "$ref": "#/$defs/string" }, "ai21labs_api_key_plaintext": { + "description": "An AI21 Labs API key provided as a plaintext string. If you prefer to reference your key using Databricks Secrets, see `ai21labs_api_key`. You must provide an API key using one of the following fields: `ai21labs_api_key` or `ai21labs_api_key_plaintext`.", "$ref": "#/$defs/string" } }, @@ -4605,7 +4823,7 @@ ] }, "serving.AiGatewayConfig": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -4635,7 +4853,7 @@ ] }, "serving.AiGatewayGuardrailParameters": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -4665,7 +4883,7 @@ ] }, "serving.AiGatewayGuardrailPiiBehavior": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -4694,7 +4912,7 @@ ] }, "serving.AiGatewayGuardrails": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -4716,7 +4934,7 @@ ] }, "serving.AiGatewayInferenceTableConfig": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -4746,7 +4964,7 @@ ] }, "serving.AiGatewayRateLimit": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -4791,7 +5009,7 @@ ] }, "serving.AiGatewayUsageTrackingConfig": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -4809,7 +5027,7 @@ ] }, "serving.AmazonBedrockConfig": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -4861,7 +5079,7 @@ ] }, "serving.AnthropicConfig": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -4883,7 +5101,7 @@ ] }, "serving.AutoCaptureConfigInput": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -4913,7 +5131,7 @@ ] }, "serving.CohereConfig": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -4939,7 +5157,7 @@ ] }, "serving.DatabricksModelServingConfig": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -4968,7 +5186,7 @@ ] }, "serving.EndpointCoreConfigInput": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -4998,7 +5216,7 @@ ] }, "serving.EndpointTag": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -5023,7 +5241,7 @@ ] }, "serving.ExternalModel": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -5100,20 +5318,24 @@ ] }, "serving.GoogleCloudVertexAiConfig": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { "private_key": { + "description": "The Databricks secret key reference for a private key for the service account which has access to the Google Cloud Vertex AI Service. See [Best practices for managing service account keys](https://cloud.google.com/iam/docs/best-practices-for-managing-service-account-keys). If you prefer to paste your API key directly, see `private_key_plaintext`. You must provide an API key using one of the following fields: `private_key` or `private_key_plaintext`", "$ref": "#/$defs/string" }, "private_key_plaintext": { + "description": "The private key for the service account which has access to the Google Cloud Vertex AI Service provided as a plaintext secret. See [Best practices for managing service account keys](https://cloud.google.com/iam/docs/best-practices-for-managing-service-account-keys). If you prefer to reference your key using Databricks Secrets, see `private_key`. You must provide an API key using one of the following fields: `private_key` or `private_key_plaintext`.", "$ref": "#/$defs/string" }, "project_id": { + "description": "This is the Google Cloud project id that the service account is associated with.", "$ref": "#/$defs/string" }, "region": { + "description": "This is the region for the Google Cloud Vertex AI Service. See [supported regions](https://cloud.google.com/vertex-ai/docs/general/locations) for more details. Some models are only available in specific regions.", "$ref": "#/$defs/string" } }, @@ -5126,41 +5348,52 @@ ] }, "serving.OpenAiConfig": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { "microsoft_entra_client_id": { + "description": "This field is only required for Azure AD OpenAI and is the Microsoft Entra Client ID.\n", "$ref": "#/$defs/string" }, "microsoft_entra_client_secret": { + "description": "The Databricks secret key reference for a client secret used for Microsoft Entra ID authentication.\nIf you prefer to paste your client secret directly, see `microsoft_entra_client_secret_plaintext`.\nYou must provide an API key using one of the following fields: `microsoft_entra_client_secret` or `microsoft_entra_client_secret_plaintext`.\n", "$ref": "#/$defs/string" }, "microsoft_entra_client_secret_plaintext": { + "description": "The client secret used for Microsoft Entra ID authentication provided as a plaintext string.\nIf you prefer to reference your key using Databricks Secrets, see `microsoft_entra_client_secret`.\nYou must provide an API key using one of the following fields: `microsoft_entra_client_secret` or `microsoft_entra_client_secret_plaintext`.\n", "$ref": "#/$defs/string" }, "microsoft_entra_tenant_id": { + "description": "This field is only required for Azure AD OpenAI and is the Microsoft Entra Tenant ID.\n", "$ref": "#/$defs/string" }, "openai_api_base": { + "description": "This is a field to provide a customized base URl for the OpenAI API.\nFor Azure OpenAI, this field is required, and is the base URL for the Azure OpenAI API service\nprovided by Azure.\nFor other OpenAI API types, this field is optional, and if left unspecified, the standard OpenAI base URL is used.\n", "$ref": "#/$defs/string" }, "openai_api_key": { + "description": "The Databricks secret key reference for an OpenAI API key using the OpenAI or Azure service. If you prefer to paste your API key directly, see `openai_api_key_plaintext`. You must provide an API key using one of the following fields: `openai_api_key` or `openai_api_key_plaintext`.", "$ref": "#/$defs/string" }, "openai_api_key_plaintext": { + "description": "The OpenAI API key using the OpenAI or Azure service provided as a plaintext string. If you prefer to reference your key using Databricks Secrets, see `openai_api_key`. You must provide an API key using one of the following fields: `openai_api_key` or `openai_api_key_plaintext`.", "$ref": "#/$defs/string" }, "openai_api_type": { + "description": "This is an optional field to specify the type of OpenAI API to use.\nFor Azure OpenAI, this field is required, and adjust this parameter to represent the preferred security\naccess validation protocol. For access token validation, use azure. For authentication using Azure Active\nDirectory (Azure AD) use, azuread.\n", "$ref": "#/$defs/string" }, "openai_api_version": { + "description": "This is an optional field to specify the OpenAI API version.\nFor Azure OpenAI, this field is required, and is the version of the Azure OpenAI service to\nutilize, specified by a date.\n", "$ref": "#/$defs/string" }, "openai_deployment_name": { + "description": "This field is only required for Azure OpenAI and is the name of the deployment resource for the\nAzure OpenAI service.\n", "$ref": "#/$defs/string" }, "openai_organization": { + "description": "This is an optional field to specify the organization in OpenAI or Azure OpenAI.\n", "$ref": "#/$defs/string" } }, @@ -5173,14 +5406,16 @@ ] }, "serving.PaLmConfig": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { "palm_api_key": { + "description": "The Databricks secret key reference for a PaLM API key. If you prefer to paste your API key directly, see `palm_api_key_plaintext`. You must provide an API key using one of the following fields: `palm_api_key` or `palm_api_key_plaintext`.", "$ref": "#/$defs/string" }, "palm_api_key_plaintext": { + "description": "The PaLM API key provided as a plaintext string. If you prefer to reference your key using Databricks Secrets, see `palm_api_key`. You must provide an API key using one of the following fields: `palm_api_key` or `palm_api_key_plaintext`.", "$ref": "#/$defs/string" } }, @@ -5193,7 +5428,7 @@ ] }, "serving.RateLimit": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -5238,7 +5473,7 @@ ] }, "serving.Route": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -5264,7 +5499,7 @@ ] }, "serving.ServedEntityInput": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -5322,7 +5557,7 @@ ] }, "serving.ServedModelInput": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -5401,7 +5636,7 @@ ] }, "serving.TrafficConfig": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -5423,7 +5658,7 @@ } }, "int": { - "anyOf": [ + "oneOf": [ { "type": "integer" }, @@ -5450,7 +5685,7 @@ ] }, "int64": { - "anyOf": [ + "oneOf": [ { "type": "integer" }, @@ -5484,7 +5719,7 @@ "bundle": { "config": { "resources.Cluster": { - "anyOf": [ + "oneOf": [ { "type": "object", "additionalProperties": { @@ -5498,7 +5733,7 @@ ] }, "resources.Dashboard": { - "anyOf": [ + "oneOf": [ { "type": "object", "additionalProperties": { @@ -5512,7 +5747,7 @@ ] }, "resources.Job": { - "anyOf": [ + "oneOf": [ { "type": "object", "additionalProperties": { @@ -5526,7 +5761,7 @@ ] }, "resources.MlflowExperiment": { - "anyOf": [ + "oneOf": [ { "type": "object", "additionalProperties": { @@ -5540,7 +5775,7 @@ ] }, "resources.MlflowModel": { - "anyOf": [ + "oneOf": [ { "type": "object", "additionalProperties": { @@ -5554,7 +5789,7 @@ ] }, "resources.ModelServingEndpoint": { - "anyOf": [ + "oneOf": [ { "type": "object", "additionalProperties": { @@ -5568,7 +5803,7 @@ ] }, "resources.Pipeline": { - "anyOf": [ + "oneOf": [ { "type": "object", "additionalProperties": { @@ -5582,7 +5817,7 @@ ] }, "resources.QualityMonitor": { - "anyOf": [ + "oneOf": [ { "type": "object", "additionalProperties": { @@ -5596,7 +5831,7 @@ ] }, "resources.RegisteredModel": { - "anyOf": [ + "oneOf": [ { "type": "object", "additionalProperties": { @@ -5610,7 +5845,7 @@ ] }, "resources.Schema": { - "anyOf": [ + "oneOf": [ { "type": "object", "additionalProperties": { @@ -5624,7 +5859,7 @@ ] }, "resources.Volume": { - "anyOf": [ + "oneOf": [ { "type": "object", "additionalProperties": { @@ -5638,7 +5873,7 @@ ] }, "variable.TargetVariable": { - "anyOf": [ + "oneOf": [ { "type": "object", "additionalProperties": { @@ -5652,7 +5887,7 @@ ] }, "variable.Variable": { - "anyOf": [ + "oneOf": [ { "type": "object", "additionalProperties": { @@ -5667,7 +5902,7 @@ } }, "config.Artifact": { - "anyOf": [ + "oneOf": [ { "type": "object", "additionalProperties": { @@ -5681,7 +5916,7 @@ ] }, "config.Command": { - "anyOf": [ + "oneOf": [ { "type": "object", "additionalProperties": { @@ -5695,7 +5930,7 @@ ] }, "config.Target": { - "anyOf": [ + "oneOf": [ { "type": "object", "additionalProperties": { @@ -5713,7 +5948,7 @@ } }, "string": { - "anyOf": [ + "oneOf": [ { "type": "object", "additionalProperties": { @@ -5734,7 +5969,7 @@ "bundle": { "config": { "resources.Grant": { - "anyOf": [ + "oneOf": [ { "type": "array", "items": { @@ -5748,7 +5983,7 @@ ] }, "resources.Permission": { - "anyOf": [ + "oneOf": [ { "type": "array", "items": { @@ -5763,7 +5998,7 @@ } }, "config.ArtifactFile": { - "anyOf": [ + "oneOf": [ { "type": "array", "items": { @@ -5781,7 +6016,7 @@ "databricks-sdk-go": { "service": { "catalog.MonitorMetric": { - "anyOf": [ + "oneOf": [ { "type": "array", "items": { @@ -5795,7 +6030,7 @@ ] }, "compute.InitScriptInfo": { - "anyOf": [ + "oneOf": [ { "type": "array", "items": { @@ -5809,7 +6044,7 @@ ] }, "compute.Library": { - "anyOf": [ + "oneOf": [ { "type": "array", "items": { @@ -5823,7 +6058,7 @@ ] }, "jobs.JobCluster": { - "anyOf": [ + "oneOf": [ { "type": "array", "items": { @@ -5837,7 +6072,7 @@ ] }, "jobs.JobEnvironment": { - "anyOf": [ + "oneOf": [ { "type": "array", "items": { @@ -5851,7 +6086,7 @@ ] }, "jobs.JobParameterDefinition": { - "anyOf": [ + "oneOf": [ { "type": "array", "items": { @@ -5865,7 +6100,7 @@ ] }, "jobs.JobsHealthRule": { - "anyOf": [ + "oneOf": [ { "type": "array", "items": { @@ -5879,7 +6114,7 @@ ] }, "jobs.SqlTaskSubscription": { - "anyOf": [ + "oneOf": [ { "type": "array", "items": { @@ -5893,7 +6128,7 @@ ] }, "jobs.Task": { - "anyOf": [ + "oneOf": [ { "type": "array", "items": { @@ -5907,7 +6142,7 @@ ] }, "jobs.TaskDependency": { - "anyOf": [ + "oneOf": [ { "type": "array", "items": { @@ -5921,7 +6156,7 @@ ] }, "jobs.Webhook": { - "anyOf": [ + "oneOf": [ { "type": "array", "items": { @@ -5935,7 +6170,7 @@ ] }, "ml.ExperimentTag": { - "anyOf": [ + "oneOf": [ { "type": "array", "items": { @@ -5949,7 +6184,7 @@ ] }, "ml.ModelTag": { - "anyOf": [ + "oneOf": [ { "type": "array", "items": { @@ -5963,7 +6198,7 @@ ] }, "ml.ModelVersion": { - "anyOf": [ + "oneOf": [ { "type": "array", "items": { @@ -5977,7 +6212,7 @@ ] }, "ml.ModelVersionTag": { - "anyOf": [ + "oneOf": [ { "type": "array", "items": { @@ -5991,7 +6226,7 @@ ] }, "pipelines.IngestionConfig": { - "anyOf": [ + "oneOf": [ { "type": "array", "items": { @@ -6005,7 +6240,7 @@ ] }, "pipelines.Notifications": { - "anyOf": [ + "oneOf": [ { "type": "array", "items": { @@ -6019,7 +6254,7 @@ ] }, "pipelines.PipelineCluster": { - "anyOf": [ + "oneOf": [ { "type": "array", "items": { @@ -6033,7 +6268,7 @@ ] }, "pipelines.PipelineLibrary": { - "anyOf": [ + "oneOf": [ { "type": "array", "items": { @@ -6046,8 +6281,22 @@ } ] }, + "pipelines.RestartWindowDaysOfWeek": { + "oneOf": [ + { + "type": "array", + "items": { + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.RestartWindowDaysOfWeek" + } + }, + { + "type": "string", + "pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}" + } + ] + }, "serving.AiGatewayRateLimit": { - "anyOf": [ + "oneOf": [ { "type": "array", "items": { @@ -6061,7 +6310,7 @@ ] }, "serving.EndpointTag": { - "anyOf": [ + "oneOf": [ { "type": "array", "items": { @@ -6075,7 +6324,7 @@ ] }, "serving.RateLimit": { - "anyOf": [ + "oneOf": [ { "type": "array", "items": { @@ -6089,7 +6338,7 @@ ] }, "serving.Route": { - "anyOf": [ + "oneOf": [ { "type": "array", "items": { @@ -6103,7 +6352,7 @@ ] }, "serving.ServedEntityInput": { - "anyOf": [ + "oneOf": [ { "type": "array", "items": { @@ -6117,7 +6366,7 @@ ] }, "serving.ServedModelInput": { - "anyOf": [ + "oneOf": [ { "type": "array", "items": { @@ -6135,7 +6384,7 @@ } }, "string": { - "anyOf": [ + "oneOf": [ { "type": "array", "items": { @@ -6156,39 +6405,57 @@ "type": "object", "properties": { "artifacts": { + "description": "Defines the attributes to build an artifact", "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config.Artifact" }, "bundle": { - "$ref": "#/$defs/github.com/databricks/cli/bundle/config.Bundle" + "description": "The attributes of the bundle.", + "$ref": "#/$defs/github.com/databricks/cli/bundle/config.Bundle", + "markdownDescription": "The attributes of the bundle. See [bundle](https://docs.databricks.com/dev-tools/bundles/settings.html#bundle)" }, "experimental": { + "description": "Defines attributes for experimental features.", "$ref": "#/$defs/github.com/databricks/cli/bundle/config.Experimental" }, "include": { - "$ref": "#/$defs/slice/string" + "description": "Specifies a list of path globs that contain configuration files to include within the bundle.", + "$ref": "#/$defs/slice/string", + "markdownDescription": "Specifies a list of path globs that contain configuration files to include within the bundle. See [include](https://docs.databricks.com/dev-tools/bundles/settings.html#include)" }, "permissions": { - "$ref": "#/$defs/slice/github.com/databricks/cli/bundle/config/resources.Permission" + "description": "Defines the permissions to apply to experiments, jobs, pipelines, and models defined in the bundle", + "$ref": "#/$defs/slice/github.com/databricks/cli/bundle/config/resources.Permission", + "markdownDescription": "Defines the permissions to apply to experiments, jobs, pipelines, and models defined in the bundle. See [permissions](https://docs.databricks.com/dev-tools/bundles/settings.html#permissions) and [link](https://docs.databricks.com/dev-tools/bundles/permissions.html)." }, "presets": { - "$ref": "#/$defs/github.com/databricks/cli/bundle/config.Presets" + "description": "Defines bundle deployment presets.", + "$ref": "#/$defs/github.com/databricks/cli/bundle/config.Presets", + "markdownDescription": "Defines bundle deployment presets. See [presets](https://docs.databricks.com/dev-tools/bundles/deployment-modes.html#presets)." }, "resources": { - "$ref": "#/$defs/github.com/databricks/cli/bundle/config.Resources" + "description": "Specifies information about the Databricks resources used by the bundle", + "$ref": "#/$defs/github.com/databricks/cli/bundle/config.Resources", + "markdownDescription": "Specifies information about the Databricks resources used by the bundle. See [link](https://docs.databricks.com/dev-tools/bundles/resources.html)." }, "run_as": { + "description": "The identity to use to run the bundle.", "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.JobRunAs" }, "sync": { - "$ref": "#/$defs/github.com/databricks/cli/bundle/config.Sync" + "description": "The files and file paths to include or exclude in the bundle.", + "$ref": "#/$defs/github.com/databricks/cli/bundle/config.Sync", + "markdownDescription": "The files and file paths to include or exclude in the bundle. See [link](https://docs.databricks.com/dev-tools/bundles/)" }, "targets": { + "description": "Defines deployment targets for the bundle.", "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config.Target" }, "variables": { + "description": "A Map that defines the custom variables for the bundle, where each key is the name of the variable, and the value is a Map that defines the variable.", "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/variable.Variable" }, "workspace": { + "description": "Defines the Databricks workspace for the bundle.", "$ref": "#/$defs/github.com/databricks/cli/bundle/config.Workspace" } }, diff --git a/bundle/tests/clusters_test.go b/bundle/tests/clusters_test.go deleted file mode 100644 index def8a2a31..000000000 --- a/bundle/tests/clusters_test.go +++ /dev/null @@ -1,36 +0,0 @@ -package config_tests - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestClusters(t *testing.T) { - b := load(t, "./clusters") - assert.Equal(t, "clusters", b.Config.Bundle.Name) - - cluster := b.Config.Resources.Clusters["foo"] - assert.Equal(t, "foo", cluster.ClusterName) - assert.Equal(t, "13.3.x-scala2.12", cluster.SparkVersion) - assert.Equal(t, "i3.xlarge", cluster.NodeTypeId) - assert.Equal(t, 2, cluster.NumWorkers) - assert.Equal(t, "2g", cluster.SparkConf["spark.executor.memory"]) - assert.Equal(t, 2, cluster.Autoscale.MinWorkers) - assert.Equal(t, 7, cluster.Autoscale.MaxWorkers) -} - -func TestClustersOverride(t *testing.T) { - b := loadTarget(t, "./clusters", "development") - assert.Equal(t, "clusters", b.Config.Bundle.Name) - - cluster := b.Config.Resources.Clusters["foo"] - assert.Equal(t, "foo-override", cluster.ClusterName) - assert.Equal(t, "15.2.x-scala2.12", cluster.SparkVersion) - assert.Equal(t, "m5.xlarge", cluster.NodeTypeId) - assert.Equal(t, 3, cluster.NumWorkers) - assert.Equal(t, "4g", cluster.SparkConf["spark.executor.memory"]) - assert.Equal(t, "4g", cluster.SparkConf["spark.executor.memory2"]) - assert.Equal(t, 1, cluster.Autoscale.MinWorkers) - assert.Equal(t, 3, cluster.Autoscale.MaxWorkers) -} diff --git a/bundle/tests/complex_variables_test.go b/bundle/tests/complex_variables_test.go deleted file mode 100644 index e68823c33..000000000 --- a/bundle/tests/complex_variables_test.go +++ /dev/null @@ -1,108 +0,0 @@ -package config_tests - -import ( - "context" - "testing" - - "github.com/databricks/cli/bundle" - "github.com/databricks/cli/bundle/config/mutator" - "github.com/databricks/databricks-sdk-go/service/compute" - "github.com/stretchr/testify/require" -) - -func TestComplexVariables(t *testing.T) { - b, diags := loadTargetWithDiags("variables/complex", "default") - require.Empty(t, diags) - - diags = bundle.Apply(context.Background(), b, bundle.Seq( - mutator.SetVariables(), - mutator.ResolveVariableReferencesInComplexVariables(), - mutator.ResolveVariableReferences( - "variables", - ), - )) - require.NoError(t, diags.Error()) - - require.Equal(t, "13.2.x-scala2.11", b.Config.Resources.Jobs["my_job"].JobClusters[0].NewCluster.SparkVersion) - require.Equal(t, "Standard_DS3_v2", b.Config.Resources.Jobs["my_job"].JobClusters[0].NewCluster.NodeTypeId) - require.Equal(t, "some-policy-id", b.Config.Resources.Jobs["my_job"].JobClusters[0].NewCluster.PolicyId) - require.Equal(t, 2, b.Config.Resources.Jobs["my_job"].JobClusters[0].NewCluster.NumWorkers) - require.Equal(t, "true", b.Config.Resources.Jobs["my_job"].JobClusters[0].NewCluster.SparkConf["spark.speculation"]) - require.Equal(t, "true", b.Config.Resources.Jobs["my_job"].JobClusters[0].NewCluster.SparkConf["spark.random"]) - - require.Equal(t, 3, len(b.Config.Resources.Jobs["my_job"].Tasks[0].Libraries)) - require.Contains(t, b.Config.Resources.Jobs["my_job"].Tasks[0].Libraries, compute.Library{ - Jar: "/path/to/jar", - }) - require.Contains(t, b.Config.Resources.Jobs["my_job"].Tasks[0].Libraries, compute.Library{ - Egg: "/path/to/egg", - }) - require.Contains(t, b.Config.Resources.Jobs["my_job"].Tasks[0].Libraries, compute.Library{ - Whl: "/path/to/whl", - }) - - require.Equal(t, "task with spark version 13.2.x-scala2.11 and jar /path/to/jar", b.Config.Resources.Jobs["my_job"].Tasks[0].TaskKey) -} - -func TestComplexVariablesOverride(t *testing.T) { - b, diags := loadTargetWithDiags("variables/complex", "dev") - require.Empty(t, diags) - - diags = bundle.Apply(context.Background(), b, bundle.Seq( - mutator.SetVariables(), - mutator.ResolveVariableReferencesInComplexVariables(), - mutator.ResolveVariableReferences( - "variables", - ), - )) - require.NoError(t, diags.Error()) - - require.Equal(t, "14.2.x-scala2.11", b.Config.Resources.Jobs["my_job"].JobClusters[0].NewCluster.SparkVersion) - require.Equal(t, "Standard_DS3_v3", b.Config.Resources.Jobs["my_job"].JobClusters[0].NewCluster.NodeTypeId) - require.Equal(t, 4, b.Config.Resources.Jobs["my_job"].JobClusters[0].NewCluster.NumWorkers) - require.Equal(t, "false", b.Config.Resources.Jobs["my_job"].JobClusters[0].NewCluster.SparkConf["spark.speculation"]) - - // Making sure the variable is overriden and not merged / extended - // These properties are set in the default target but not set in override target - // So they should be empty - require.Equal(t, "", b.Config.Resources.Jobs["my_job"].JobClusters[0].NewCluster.SparkConf["spark.random"]) - require.Equal(t, "", b.Config.Resources.Jobs["my_job"].JobClusters[0].NewCluster.PolicyId) -} - -func TestComplexVariablesOverrideWithMultipleFiles(t *testing.T) { - b, diags := loadTargetWithDiags("variables/complex_multiple_files", "dev") - require.Empty(t, diags) - - diags = bundle.Apply(context.Background(), b, bundle.Seq( - mutator.SetVariables(), - mutator.ResolveVariableReferencesInComplexVariables(), - mutator.ResolveVariableReferences( - "variables", - ), - )) - require.NoError(t, diags.Error()) - for _, cluster := range b.Config.Resources.Jobs["my_job"].JobClusters { - require.Equalf(t, "14.2.x-scala2.11", cluster.NewCluster.SparkVersion, "cluster: %v", cluster.JobClusterKey) - require.Equalf(t, "Standard_DS3_v2", cluster.NewCluster.NodeTypeId, "cluster: %v", cluster.JobClusterKey) - require.Equalf(t, 4, cluster.NewCluster.NumWorkers, "cluster: %v", cluster.JobClusterKey) - require.Equalf(t, "false", cluster.NewCluster.SparkConf["spark.speculation"], "cluster: %v", cluster.JobClusterKey) - } -} - -func TestComplexVariablesOverrideWithFullSyntax(t *testing.T) { - b, diags := loadTargetWithDiags("variables/complex", "dev") - require.Empty(t, diags) - - diags = bundle.Apply(context.Background(), b, bundle.Seq( - mutator.SetVariables(), - mutator.ResolveVariableReferencesInComplexVariables(), - mutator.ResolveVariableReferences( - "variables", - ), - )) - require.NoError(t, diags.Error()) - require.Empty(t, diags) - - complexvar := b.Config.Variables["complexvar"].Value - require.Equal(t, map[string]any{"key1": "1", "key2": "2", "key3": "3"}, complexvar) -} diff --git a/bundle/tests/environment_git_test.go b/bundle/tests/environment_git_test.go index d4695c78d..848b972b1 100644 --- a/bundle/tests/environment_git_test.go +++ b/bundle/tests/environment_git_test.go @@ -2,7 +2,6 @@ package config_tests import ( "context" - "fmt" "strings" "testing" @@ -16,7 +15,7 @@ func TestGitAutoLoadWithEnvironment(t *testing.T) { bundle.Apply(context.Background(), b, mutator.LoadGitDetails()) assert.True(t, b.Config.Bundle.Git.Inferred) validUrl := strings.Contains(b.Config.Bundle.Git.OriginURL, "/cli") || strings.Contains(b.Config.Bundle.Git.OriginURL, "/bricks") - assert.True(t, validUrl, fmt.Sprintf("Expected URL to contain '/cli' or '/bricks', got %s", b.Config.Bundle.Git.OriginURL)) + assert.True(t, validUrl, "Expected URL to contain '/cli' or '/bricks', got %s", b.Config.Bundle.Git.OriginURL) } func TestGitManuallySetBranchWithEnvironment(t *testing.T) { @@ -25,5 +24,5 @@ func TestGitManuallySetBranchWithEnvironment(t *testing.T) { assert.False(t, b.Config.Bundle.Git.Inferred) assert.Equal(t, "main", b.Config.Bundle.Git.Branch) validUrl := strings.Contains(b.Config.Bundle.Git.OriginURL, "/cli") || strings.Contains(b.Config.Bundle.Git.OriginURL, "/bricks") - assert.True(t, validUrl, fmt.Sprintf("Expected URL to contain '/cli' or '/bricks', got %s", b.Config.Bundle.Git.OriginURL)) + assert.True(t, validUrl, "Expected URL to contain '/cli' or '/bricks', got %s", b.Config.Bundle.Git.OriginURL) } diff --git a/bundle/tests/environment_overrides_test.go b/bundle/tests/environment_overrides_test.go index 4a1115048..b68b083ff 100644 --- a/bundle/tests/environment_overrides_test.go +++ b/bundle/tests/environment_overrides_test.go @@ -21,8 +21,8 @@ func TestEnvironmentOverridesResourcesDev(t *testing.T) { assert.Equal(t, "base job", b.Config.Resources.Jobs["job1"].Name) // Base values are preserved in the development environment. - assert.Equal(t, true, b.Config.Resources.Pipelines["boolean1"].Photon) - assert.Equal(t, false, b.Config.Resources.Pipelines["boolean2"].Photon) + assert.True(t, b.Config.Resources.Pipelines["boolean1"].Photon) + assert.False(t, b.Config.Resources.Pipelines["boolean2"].Photon) } func TestEnvironmentOverridesResourcesStaging(t *testing.T) { @@ -30,6 +30,6 @@ func TestEnvironmentOverridesResourcesStaging(t *testing.T) { assert.Equal(t, "staging job", b.Config.Resources.Jobs["job1"].Name) // Override values are applied in the staging environment. - assert.Equal(t, false, b.Config.Resources.Pipelines["boolean1"].Photon) - assert.Equal(t, true, b.Config.Resources.Pipelines["boolean2"].Photon) + assert.False(t, b.Config.Resources.Pipelines["boolean1"].Photon) + assert.True(t, b.Config.Resources.Pipelines["boolean2"].Photon) } diff --git a/bundle/tests/environments_job_and_pipeline_test.go b/bundle/tests/environments_job_and_pipeline_test.go index 218d2e470..423b14c07 100644 --- a/bundle/tests/environments_job_and_pipeline_test.go +++ b/bundle/tests/environments_job_and_pipeline_test.go @@ -10,11 +10,11 @@ import ( func TestJobAndPipelineDevelopmentWithEnvironment(t *testing.T) { b := loadTarget(t, "./environments_job_and_pipeline", "development") - assert.Len(t, b.Config.Resources.Jobs, 0) + assert.Empty(t, b.Config.Resources.Jobs) assert.Len(t, b.Config.Resources.Pipelines, 1) p := b.Config.Resources.Pipelines["nyc_taxi_pipeline"] - assert.Equal(t, b.Config.Bundle.Mode, config.Development) + assert.Equal(t, config.Development, b.Config.Bundle.Mode) assert.True(t, p.Development) require.Len(t, p.Libraries, 1) assert.Equal(t, "./dlt/nyc_taxi_loader", p.Libraries[0].Notebook.Path) @@ -23,7 +23,7 @@ func TestJobAndPipelineDevelopmentWithEnvironment(t *testing.T) { func TestJobAndPipelineStagingWithEnvironment(t *testing.T) { b := loadTarget(t, "./environments_job_and_pipeline", "staging") - assert.Len(t, b.Config.Resources.Jobs, 0) + assert.Empty(t, b.Config.Resources.Jobs) assert.Len(t, b.Config.Resources.Pipelines, 1) p := b.Config.Resources.Pipelines["nyc_taxi_pipeline"] diff --git a/bundle/tests/git_test.go b/bundle/tests/git_test.go index dec6c268a..41293e450 100644 --- a/bundle/tests/git_test.go +++ b/bundle/tests/git_test.go @@ -2,7 +2,6 @@ package config_tests import ( "context" - "fmt" "strings" "testing" @@ -17,7 +16,7 @@ func TestGitAutoLoad(t *testing.T) { bundle.Apply(context.Background(), b, mutator.LoadGitDetails()) assert.True(t, b.Config.Bundle.Git.Inferred) validUrl := strings.Contains(b.Config.Bundle.Git.OriginURL, "/cli") || strings.Contains(b.Config.Bundle.Git.OriginURL, "/bricks") - assert.True(t, validUrl, fmt.Sprintf("Expected URL to contain '/cli' or '/bricks', got %s", b.Config.Bundle.Git.OriginURL)) + assert.True(t, validUrl, "Expected URL to contain '/cli' or '/bricks', got %s", b.Config.Bundle.Git.OriginURL) } func TestGitManuallySetBranch(t *testing.T) { @@ -26,7 +25,7 @@ func TestGitManuallySetBranch(t *testing.T) { assert.False(t, b.Config.Bundle.Git.Inferred) assert.Equal(t, "main", b.Config.Bundle.Git.Branch) validUrl := strings.Contains(b.Config.Bundle.Git.OriginURL, "/cli") || strings.Contains(b.Config.Bundle.Git.OriginURL, "/bricks") - assert.True(t, validUrl, fmt.Sprintf("Expected URL to contain '/cli' or '/bricks', got %s", b.Config.Bundle.Git.OriginURL)) + assert.True(t, validUrl, "Expected URL to contain '/cli' or '/bricks', got %s", b.Config.Bundle.Git.OriginURL) } func TestGitBundleBranchValidation(t *testing.T) { diff --git a/bundle/tests/issue_1828_test.go b/bundle/tests/issue_1828_test.go index 5f2becce5..31fcfeb8e 100644 --- a/bundle/tests/issue_1828_test.go +++ b/bundle/tests/issue_1828_test.go @@ -35,7 +35,7 @@ func TestIssue1828(t *testing.T) { } if assert.Contains(t, b.Config.Variables, "float") { - assert.Equal(t, 3.14, b.Config.Variables["float"].Default) + assert.InDelta(t, 3.14, b.Config.Variables["float"].Default, 0.0001) } if assert.Contains(t, b.Config.Variables, "time") { @@ -43,6 +43,6 @@ func TestIssue1828(t *testing.T) { } if assert.Contains(t, b.Config.Variables, "nil") { - assert.Equal(t, nil, b.Config.Variables["nil"].Default) + assert.Nil(t, b.Config.Variables["nil"].Default) } } diff --git a/bundle/tests/job_and_pipeline_test.go b/bundle/tests/job_and_pipeline_test.go index 65aa5bdc4..408e3e3ef 100644 --- a/bundle/tests/job_and_pipeline_test.go +++ b/bundle/tests/job_and_pipeline_test.go @@ -10,11 +10,11 @@ import ( func TestJobAndPipelineDevelopment(t *testing.T) { b := loadTarget(t, "./job_and_pipeline", "development") - assert.Len(t, b.Config.Resources.Jobs, 0) + assert.Empty(t, b.Config.Resources.Jobs) assert.Len(t, b.Config.Resources.Pipelines, 1) p := b.Config.Resources.Pipelines["nyc_taxi_pipeline"] - assert.Equal(t, b.Config.Bundle.Mode, config.Development) + assert.Equal(t, config.Development, b.Config.Bundle.Mode) assert.True(t, p.Development) require.Len(t, p.Libraries, 1) assert.Equal(t, "./dlt/nyc_taxi_loader", p.Libraries[0].Notebook.Path) @@ -23,7 +23,7 @@ func TestJobAndPipelineDevelopment(t *testing.T) { func TestJobAndPipelineStaging(t *testing.T) { b := loadTarget(t, "./job_and_pipeline", "staging") - assert.Len(t, b.Config.Resources.Jobs, 0) + assert.Empty(t, b.Config.Resources.Jobs) assert.Len(t, b.Config.Resources.Pipelines, 1) p := b.Config.Resources.Pipelines["nyc_taxi_pipeline"] diff --git a/bundle/tests/job_cluster_key_test.go b/bundle/tests/job_cluster_key_test.go index 5a8b368e5..6a08da89c 100644 --- a/bundle/tests/job_cluster_key_test.go +++ b/bundle/tests/job_cluster_key_test.go @@ -16,13 +16,13 @@ func TestJobClusterKeyNotDefinedTest(t *testing.T) { diags := bundle.ApplyReadOnly(context.Background(), bundle.ReadOnly(b), validate.JobClusterKeyDefined()) require.Len(t, diags, 1) require.NoError(t, diags.Error()) - require.Equal(t, diags[0].Severity, diag.Warning) - require.Equal(t, diags[0].Summary, "job_cluster_key key is not defined") + require.Equal(t, diag.Warning, diags[0].Severity) + require.Equal(t, "job_cluster_key key is not defined", diags[0].Summary) } func TestJobClusterKeyDefinedTest(t *testing.T) { b := loadTarget(t, "./job_cluster_key", "development") diags := bundle.ApplyReadOnly(context.Background(), bundle.ReadOnly(b), validate.JobClusterKeyDefined()) - require.Len(t, diags, 0) + require.Empty(t, diags) } diff --git a/bundle/tests/loader.go b/bundle/tests/loader.go index 5c48d81cb..bb68b3059 100644 --- a/bundle/tests/loader.go +++ b/bundle/tests/loader.go @@ -7,6 +7,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config/mutator" "github.com/databricks/cli/bundle/phases" + "github.com/databricks/cli/libs/dbr" "github.com/databricks/cli/libs/diag" "github.com/databricks/databricks-sdk-go/config" "github.com/databricks/databricks-sdk-go/experimental/mocks" @@ -66,7 +67,7 @@ func initializeTarget(t *testing.T, path, env string) (*bundle.Bundle, diag.Diag b := load(t, path) configureMock(t, b) - ctx := context.Background() + ctx := dbr.MockRuntime(context.Background(), false) diags := bundle.Apply(ctx, b, bundle.Seq( mutator.SelectTarget(env), phases.Initialize(), diff --git a/bundle/tests/model_serving_endpoint_test.go b/bundle/tests/model_serving_endpoint_test.go index b8b800863..f779a07e6 100644 --- a/bundle/tests/model_serving_endpoint_test.go +++ b/bundle/tests/model_serving_endpoint_test.go @@ -20,7 +20,7 @@ func assertExpected(t *testing.T, p *resources.ModelServingEndpoint) { func TestModelServingEndpointDevelopment(t *testing.T) { b := loadTarget(t, "./model_serving_endpoint", "development") assert.Len(t, b.Config.Resources.ModelServingEndpoints, 1) - assert.Equal(t, b.Config.Bundle.Mode, config.Development) + assert.Equal(t, config.Development, b.Config.Bundle.Mode) p := b.Config.Resources.ModelServingEndpoints["my_model_serving_endpoint"] assert.Equal(t, "my-dev-endpoint", p.Name) diff --git a/bundle/tests/override_job_cluster_test.go b/bundle/tests/override_job_cluster_test.go deleted file mode 100644 index 1393e03e5..000000000 --- a/bundle/tests/override_job_cluster_test.go +++ /dev/null @@ -1,29 +0,0 @@ -package config_tests - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestOverrideJobClusterDev(t *testing.T) { - b := loadTarget(t, "./override_job_cluster", "development") - assert.Equal(t, "job", b.Config.Resources.Jobs["foo"].Name) - assert.Len(t, b.Config.Resources.Jobs["foo"].JobClusters, 1) - - c := b.Config.Resources.Jobs["foo"].JobClusters[0] - assert.Equal(t, "13.3.x-scala2.12", c.NewCluster.SparkVersion) - assert.Equal(t, "i3.xlarge", c.NewCluster.NodeTypeId) - assert.Equal(t, 1, c.NewCluster.NumWorkers) -} - -func TestOverrideJobClusterStaging(t *testing.T) { - b := loadTarget(t, "./override_job_cluster", "staging") - assert.Equal(t, "job", b.Config.Resources.Jobs["foo"].Name) - assert.Len(t, b.Config.Resources.Jobs["foo"].JobClusters, 1) - - c := b.Config.Resources.Jobs["foo"].JobClusters[0] - assert.Equal(t, "13.3.x-scala2.12", c.NewCluster.SparkVersion) - assert.Equal(t, "i3.2xlarge", c.NewCluster.NodeTypeId) - assert.Equal(t, 4, c.NewCluster.NumWorkers) -} diff --git a/bundle/tests/override_job_tasks_test.go b/bundle/tests/override_job_tasks_test.go deleted file mode 100644 index 82da04da2..000000000 --- a/bundle/tests/override_job_tasks_test.go +++ /dev/null @@ -1,39 +0,0 @@ -package config_tests - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestOverrideTasksDev(t *testing.T) { - b := loadTarget(t, "./override_job_tasks", "development") - assert.Equal(t, "job", b.Config.Resources.Jobs["foo"].Name) - assert.Len(t, b.Config.Resources.Jobs["foo"].Tasks, 2) - - tasks := b.Config.Resources.Jobs["foo"].Tasks - assert.Equal(t, tasks[0].TaskKey, "key1") - assert.Equal(t, tasks[0].NewCluster.NodeTypeId, "i3.xlarge") - assert.Equal(t, tasks[0].NewCluster.NumWorkers, 1) - assert.Equal(t, tasks[0].SparkPythonTask.PythonFile, "./test1.py") - - assert.Equal(t, tasks[1].TaskKey, "key2") - assert.Equal(t, tasks[1].NewCluster.SparkVersion, "13.3.x-scala2.12") - assert.Equal(t, tasks[1].SparkPythonTask.PythonFile, "./test2.py") -} - -func TestOverrideTasksStaging(t *testing.T) { - b := loadTarget(t, "./override_job_tasks", "staging") - assert.Equal(t, "job", b.Config.Resources.Jobs["foo"].Name) - assert.Len(t, b.Config.Resources.Jobs["foo"].Tasks, 2) - - tasks := b.Config.Resources.Jobs["foo"].Tasks - assert.Equal(t, tasks[0].TaskKey, "key1") - assert.Equal(t, tasks[0].NewCluster.SparkVersion, "13.3.x-scala2.12") - assert.Equal(t, tasks[0].SparkPythonTask.PythonFile, "./test1.py") - - assert.Equal(t, tasks[1].TaskKey, "key2") - assert.Equal(t, tasks[1].NewCluster.NodeTypeId, "i3.2xlarge") - assert.Equal(t, tasks[1].NewCluster.NumWorkers, 4) - assert.Equal(t, tasks[1].SparkPythonTask.PythonFile, "./test3.py") -} diff --git a/bundle/tests/override_pipeline_cluster_test.go b/bundle/tests/override_pipeline_cluster_test.go deleted file mode 100644 index 591fe423d..000000000 --- a/bundle/tests/override_pipeline_cluster_test.go +++ /dev/null @@ -1,29 +0,0 @@ -package config_tests - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestOverridePipelineClusterDev(t *testing.T) { - b := loadTarget(t, "./override_pipeline_cluster", "development") - assert.Equal(t, "job", b.Config.Resources.Pipelines["foo"].Name) - assert.Len(t, b.Config.Resources.Pipelines["foo"].Clusters, 1) - - c := b.Config.Resources.Pipelines["foo"].Clusters[0] - assert.Equal(t, map[string]string{"foo": "bar"}, c.SparkConf) - assert.Equal(t, "i3.xlarge", c.NodeTypeId) - assert.Equal(t, 1, c.NumWorkers) -} - -func TestOverridePipelineClusterStaging(t *testing.T) { - b := loadTarget(t, "./override_pipeline_cluster", "staging") - assert.Equal(t, "job", b.Config.Resources.Pipelines["foo"].Name) - assert.Len(t, b.Config.Resources.Pipelines["foo"].Clusters, 1) - - c := b.Config.Resources.Pipelines["foo"].Clusters[0] - assert.Equal(t, map[string]string{"foo": "bar"}, c.SparkConf) - assert.Equal(t, "i3.2xlarge", c.NodeTypeId) - assert.Equal(t, 4, c.NumWorkers) -} diff --git a/bundle/tests/presets_test.go b/bundle/tests/presets_test.go index 5fcb5d95b..c2cbe497b 100644 --- a/bundle/tests/presets_test.go +++ b/bundle/tests/presets_test.go @@ -13,7 +13,7 @@ func TestPresetsDev(t *testing.T) { assert.Equal(t, "myprefix", b.Config.Presets.NamePrefix) assert.Equal(t, config.Paused, b.Config.Presets.TriggerPauseStatus) assert.Equal(t, 10, b.Config.Presets.JobsMaxConcurrentRuns) - assert.Equal(t, true, *b.Config.Presets.PipelinesDevelopment) + assert.True(t, *b.Config.Presets.PipelinesDevelopment) assert.Equal(t, "true", b.Config.Presets.Tags["dev"]) assert.Equal(t, "finance", b.Config.Presets.Tags["team"]) assert.Equal(t, "false", b.Config.Presets.Tags["prod"]) @@ -22,7 +22,7 @@ func TestPresetsDev(t *testing.T) { func TestPresetsProd(t *testing.T) { b := loadTarget(t, "./presets", "prod") - assert.Equal(t, false, *b.Config.Presets.PipelinesDevelopment) + assert.False(t, *b.Config.Presets.PipelinesDevelopment) assert.Equal(t, "finance", b.Config.Presets.Tags["team"]) assert.Equal(t, "true", b.Config.Presets.Tags["prod"]) } diff --git a/bundle/tests/python_wheel_test.go b/bundle/tests/python_wheel_test.go index c982c09d6..06cb05270 100644 --- a/bundle/tests/python_wheel_test.go +++ b/bundle/tests/python_wheel_test.go @@ -23,7 +23,7 @@ func TestPythonWheelBuild(t *testing.T) { matches, err := filepath.Glob("./python_wheel/python_wheel/my_test_code/dist/my_test_code-*.whl") require.NoError(t, err) - require.Equal(t, 1, len(matches)) + require.Len(t, matches, 1) match := libraries.ExpandGlobReferences() diags = bundle.Apply(ctx, b, match) @@ -39,7 +39,7 @@ func TestPythonWheelBuildAutoDetect(t *testing.T) { matches, err := filepath.Glob("./python_wheel/python_wheel_no_artifact/dist/my_test_code-*.whl") require.NoError(t, err) - require.Equal(t, 1, len(matches)) + require.Len(t, matches, 1) match := libraries.ExpandGlobReferences() diags = bundle.Apply(ctx, b, match) @@ -55,7 +55,7 @@ func TestPythonWheelBuildAutoDetectWithNotebookTask(t *testing.T) { matches, err := filepath.Glob("./python_wheel/python_wheel_no_artifact_notebook/dist/my_test_code-*.whl") require.NoError(t, err) - require.Equal(t, 1, len(matches)) + require.Len(t, matches, 1) match := libraries.ExpandGlobReferences() diags = bundle.Apply(ctx, b, match) @@ -108,7 +108,7 @@ func TestPythonWheelBuildWithEnvironmentKey(t *testing.T) { matches, err := filepath.Glob("./python_wheel/environment_key/my_test_code/dist/my_test_code-*.whl") require.NoError(t, err) - require.Equal(t, 1, len(matches)) + require.Len(t, matches, 1) match := libraries.ExpandGlobReferences() diags = bundle.Apply(ctx, b, match) @@ -124,7 +124,7 @@ func TestPythonWheelBuildMultiple(t *testing.T) { matches, err := filepath.Glob("./python_wheel/python_wheel_multiple/my_test_code/dist/my_test_code*.whl") require.NoError(t, err) - require.Equal(t, 2, len(matches)) + require.Len(t, matches, 2) match := libraries.ExpandGlobReferences() diags = bundle.Apply(ctx, b, match) diff --git a/bundle/tests/quality_monitor_test.go b/bundle/tests/quality_monitor_test.go index 9b91052f5..e95c7b7c1 100644 --- a/bundle/tests/quality_monitor_test.go +++ b/bundle/tests/quality_monitor_test.go @@ -19,7 +19,7 @@ func assertExpectedMonitor(t *testing.T, p *resources.QualityMonitor) { func TestMonitorTableNames(t *testing.T) { b := loadTarget(t, "./quality_monitor", "development") assert.Len(t, b.Config.Resources.QualityMonitors, 1) - assert.Equal(t, b.Config.Bundle.Mode, config.Development) + assert.Equal(t, config.Development, b.Config.Bundle.Mode) p := b.Config.Resources.QualityMonitors["my_monitor"] assert.Equal(t, "main.test.dev", p.TableName) diff --git a/bundle/tests/registered_model_test.go b/bundle/tests/registered_model_test.go index 008db8bdd..e9d572a3a 100644 --- a/bundle/tests/registered_model_test.go +++ b/bundle/tests/registered_model_test.go @@ -19,7 +19,7 @@ func assertExpectedModel(t *testing.T, p *resources.RegisteredModel) { func TestRegisteredModelDevelopment(t *testing.T) { b := loadTarget(t, "./registered_model", "development") assert.Len(t, b.Config.Resources.RegisteredModels, 1) - assert.Equal(t, b.Config.Bundle.Mode, config.Development) + assert.Equal(t, config.Development, b.Config.Bundle.Mode) p := b.Config.Resources.RegisteredModels["my_registered_model"] assert.Equal(t, "my-dev-model", p.Name) diff --git a/bundle/tests/run_as_test.go b/bundle/tests/run_as_test.go index 920577146..03ff51ec5 100644 --- a/bundle/tests/run_as_test.go +++ b/bundle/tests/run_as_test.go @@ -2,7 +2,6 @@ package config_tests import ( "context" - "fmt" "testing" "github.com/databricks/cli/bundle" @@ -93,7 +92,6 @@ func TestRunAsForAllowedWithTargetOverride(t *testing.T) { assert.Equal(t, ml.Model{Name: "skynet"}, *b.Config.Resources.Models["model_one"].Model) assert.Equal(t, catalog.CreateRegisteredModelRequest{Name: "skynet (in UC)"}, *b.Config.Resources.RegisteredModels["model_two"].CreateRegisteredModelRequest) assert.Equal(t, ml.Experiment{Name: "experiment_one"}, *b.Config.Resources.Experiments["experiment_one"].Experiment) - } func TestRunAsErrorForPipelines(t *testing.T) { @@ -220,8 +218,7 @@ func TestRunAsErrorNeitherUserOrSpSpecified(t *testing.T) { for _, tc := range tcases { t.Run(tc.name, func(t *testing.T) { - - bundlePath := fmt.Sprintf("./run_as/not_allowed/neither_sp_nor_user/%s", tc.name) + bundlePath := "./run_as/not_allowed/neither_sp_nor_user/" + tc.name b := load(t, bundlePath) ctx := context.Background() diff --git a/bundle/tests/suggest_target_test.go b/bundle/tests/suggest_target_test.go index 8fb130409..02905d779 100644 --- a/bundle/tests/suggest_target_test.go +++ b/bundle/tests/suggest_target_test.go @@ -1,22 +1,22 @@ package config_tests import ( - "path/filepath" + "context" "testing" - "github.com/databricks/cli/cmd/root" - assert "github.com/databricks/cli/libs/dyn/dynassert" - - "github.com/databricks/cli/internal" + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config/mutator" + "github.com/stretchr/testify/require" ) func TestSuggestTargetIfWrongPassed(t *testing.T) { - t.Setenv("BUNDLE_ROOT", filepath.Join("target_overrides", "workspace")) - stdoutBytes, _, err := internal.RequireErrorRun(t, "bundle", "validate", "-e", "incorrect") - stdout := stdoutBytes.String() + b := load(t, "target_overrides/workspace") - assert.Error(t, root.ErrAlreadyPrinted, err) - assert.Contains(t, stdout, "Available targets:") - assert.Contains(t, stdout, "development") - assert.Contains(t, stdout, "staging") + ctx := context.Background() + diags := bundle.Apply(ctx, b, mutator.SelectTarget("incorrect")) + err := diags.Error() + require.Error(t, err) + require.Contains(t, err.Error(), "Available targets:") + require.Contains(t, err.Error(), "development") + require.Contains(t, err.Error(), "staging") } diff --git a/bundle/tests/sync_include_exclude_no_matches_test.go b/bundle/tests/sync_include_exclude_no_matches_test.go index 0192b61e6..c206e7471 100644 --- a/bundle/tests/sync_include_exclude_no_matches_test.go +++ b/bundle/tests/sync_include_exclude_no_matches_test.go @@ -20,26 +20,26 @@ func TestSyncIncludeExcludeNoMatchesTest(t *testing.T) { require.Len(t, diags, 3) require.NoError(t, diags.Error()) - require.Equal(t, diags[0].Severity, diag.Warning) - require.Equal(t, diags[0].Summary, "Pattern dist does not match any files") + require.Equal(t, diag.Warning, diags[0].Severity) + require.Equal(t, "Pattern dist does not match any files", diags[0].Summary) require.Len(t, diags[0].Paths, 1) - require.Equal(t, diags[0].Paths[0].String(), "sync.exclude[0]") + require.Equal(t, "sync.exclude[0]", diags[0].Paths[0].String()) assert.Len(t, diags[0].Locations, 1) require.Equal(t, diags[0].Locations[0].File, filepath.Join("sync", "override", "databricks.yml")) - require.Equal(t, diags[0].Locations[0].Line, 17) - require.Equal(t, diags[0].Locations[0].Column, 11) + require.Equal(t, 17, diags[0].Locations[0].Line) + require.Equal(t, 11, diags[0].Locations[0].Column) summaries := []string{ fmt.Sprintf("Pattern %s does not match any files", filepath.Join("src", "*")), fmt.Sprintf("Pattern %s does not match any files", filepath.Join("tests", "*")), } - require.Equal(t, diags[1].Severity, diag.Warning) + require.Equal(t, diag.Warning, diags[1].Severity) require.Contains(t, summaries, diags[1].Summary) - require.Equal(t, diags[2].Severity, diag.Warning) + require.Equal(t, diag.Warning, diags[2].Severity) require.Contains(t, summaries, diags[2].Summary) } @@ -47,7 +47,7 @@ func TestSyncIncludeWithNegate(t *testing.T) { b := loadTarget(t, "./sync/negate", "default") diags := bundle.ApplyReadOnly(context.Background(), bundle.ReadOnly(b), validate.ValidateSyncPatterns()) - require.Len(t, diags, 0) + require.Empty(t, diags) require.NoError(t, diags.Error()) } @@ -58,6 +58,6 @@ func TestSyncIncludeWithNegateNoMatches(t *testing.T) { require.Len(t, diags, 1) require.NoError(t, diags.Error()) - require.Equal(t, diags[0].Severity, diag.Warning) - require.Equal(t, diags[0].Summary, "Pattern !*.txt2 does not match any files") + require.Equal(t, diag.Warning, diags[0].Severity) + require.Equal(t, "Pattern !*.txt2 does not match any files", diags[0].Summary) } diff --git a/bundle/tests/sync_test.go b/bundle/tests/sync_test.go index 15644b67e..f5a0296a9 100644 --- a/bundle/tests/sync_test.go +++ b/bundle/tests/sync_test.go @@ -115,12 +115,12 @@ func TestSyncPathsNoRoot(t *testing.T) { // If set to nil, it won't sync anything. b = loadTarget(t, "./sync/paths_no_root", "nil") assert.Equal(t, filepath.FromSlash("sync/paths_no_root"), b.SyncRootPath) - assert.Len(t, b.Config.Sync.Paths, 0) + assert.Empty(t, b.Config.Sync.Paths) // If set to an empty sequence, it won't sync anything. b = loadTarget(t, "./sync/paths_no_root", "empty") assert.Equal(t, filepath.FromSlash("sync/paths_no_root"), b.SyncRootPath) - assert.Len(t, b.Config.Sync.Paths, 0) + assert.Empty(t, b.Config.Sync.Paths) } func TestSyncSharedCode(t *testing.T) { diff --git a/bundle/tests/undefined_resources_test.go b/bundle/tests/undefined_resources_test.go deleted file mode 100644 index 3dbacbc25..000000000 --- a/bundle/tests/undefined_resources_test.go +++ /dev/null @@ -1,50 +0,0 @@ -package config_tests - -import ( - "context" - "path/filepath" - "testing" - - "github.com/databricks/cli/bundle" - "github.com/databricks/cli/bundle/config/validate" - "github.com/databricks/cli/libs/diag" - "github.com/databricks/cli/libs/dyn" - "github.com/stretchr/testify/assert" -) - -func TestUndefinedResourcesLoadWithError(t *testing.T) { - b := load(t, "./undefined_resources") - diags := bundle.Apply(context.Background(), b, validate.AllResourcesHaveValues()) - - assert.Len(t, diags, 3) - assert.Contains(t, diags, diag.Diagnostic{ - Severity: diag.Error, - Summary: "job undefined-job is not defined", - Locations: []dyn.Location{{ - File: filepath.FromSlash("undefined_resources/databricks.yml"), - Line: 6, - Column: 19, - }}, - Paths: []dyn.Path{dyn.MustPathFromString("resources.jobs.undefined-job")}, - }) - assert.Contains(t, diags, diag.Diagnostic{ - Severity: diag.Error, - Summary: "experiment undefined-experiment is not defined", - Locations: []dyn.Location{{ - File: filepath.FromSlash("undefined_resources/databricks.yml"), - Line: 11, - Column: 26, - }}, - Paths: []dyn.Path{dyn.MustPathFromString("resources.experiments.undefined-experiment")}, - }) - assert.Contains(t, diags, diag.Diagnostic{ - Severity: diag.Error, - Summary: "pipeline undefined-pipeline is not defined", - Locations: []dyn.Location{{ - File: filepath.FromSlash("undefined_resources/databricks.yml"), - Line: 14, - Column: 24, - }}, - Paths: []dyn.Path{dyn.MustPathFromString("resources.pipelines.undefined-pipeline")}, - }) -} diff --git a/bundle/tests/variables_test.go b/bundle/tests/variables_test.go deleted file mode 100644 index 9451c5a04..000000000 --- a/bundle/tests/variables_test.go +++ /dev/null @@ -1,206 +0,0 @@ -package config_tests - -import ( - "context" - "testing" - - "github.com/databricks/cli/bundle" - "github.com/databricks/cli/bundle/config/mutator" - "github.com/databricks/databricks-sdk-go/experimental/mocks" - "github.com/databricks/databricks-sdk-go/service/compute" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" -) - -func TestVariables(t *testing.T) { - t.Setenv("BUNDLE_VAR_b", "def") - b := load(t, "./variables/vanilla") - diags := bundle.Apply(context.Background(), b, bundle.Seq( - mutator.SetVariables(), - mutator.ResolveVariableReferences( - "variables", - ), - )) - require.NoError(t, diags.Error()) - assert.Equal(t, "abc def", b.Config.Bundle.Name) -} - -func TestVariablesLoadingFailsWhenRequiredVariableIsNotSpecified(t *testing.T) { - b := load(t, "./variables/vanilla") - diags := bundle.Apply(context.Background(), b, bundle.Seq( - mutator.SetVariables(), - mutator.ResolveVariableReferences( - "variables", - ), - )) - assert.ErrorContains(t, diags.Error(), "no value assigned to required variable b. Assignment can be done through the \"--var\" flag or by setting the BUNDLE_VAR_b environment variable") -} - -func TestVariablesTargetsBlockOverride(t *testing.T) { - b := load(t, "./variables/env_overrides") - diags := bundle.Apply(context.Background(), b, bundle.Seq( - mutator.SelectTarget("env-with-single-variable-override"), - mutator.SetVariables(), - mutator.ResolveVariableReferences( - "variables", - ), - )) - require.NoError(t, diags.Error()) - assert.Equal(t, "default-a dev-b", b.Config.Workspace.Profile) -} - -func TestVariablesTargetsBlockOverrideForMultipleVariables(t *testing.T) { - b := load(t, "./variables/env_overrides") - diags := bundle.Apply(context.Background(), b, bundle.Seq( - mutator.SelectTarget("env-with-two-variable-overrides"), - mutator.SetVariables(), - mutator.ResolveVariableReferences( - "variables", - ), - )) - require.NoError(t, diags.Error()) - assert.Equal(t, "prod-a prod-b", b.Config.Workspace.Profile) -} - -func TestVariablesTargetsBlockOverrideWithProcessEnvVars(t *testing.T) { - t.Setenv("BUNDLE_VAR_b", "env-var-b") - b := load(t, "./variables/env_overrides") - diags := bundle.Apply(context.Background(), b, bundle.Seq( - mutator.SelectTarget("env-with-two-variable-overrides"), - mutator.SetVariables(), - mutator.ResolveVariableReferences( - "variables", - ), - )) - require.NoError(t, diags.Error()) - assert.Equal(t, "prod-a env-var-b", b.Config.Workspace.Profile) -} - -func TestVariablesTargetsBlockOverrideWithMissingVariables(t *testing.T) { - b := load(t, "./variables/env_overrides") - diags := bundle.Apply(context.Background(), b, bundle.Seq( - mutator.SelectTarget("env-missing-a-required-variable-assignment"), - mutator.SetVariables(), - mutator.ResolveVariableReferences( - "variables", - ), - )) - assert.ErrorContains(t, diags.Error(), "no value assigned to required variable b. Assignment can be done through the \"--var\" flag or by setting the BUNDLE_VAR_b environment variable") -} - -func TestVariablesTargetsBlockOverrideWithUndefinedVariables(t *testing.T) { - b := load(t, "./variables/env_overrides") - diags := bundle.Apply(context.Background(), b, bundle.Seq( - mutator.SelectTarget("env-using-an-undefined-variable"), - mutator.SetVariables(), - mutator.ResolveVariableReferences( - "variables", - ), - )) - assert.ErrorContains(t, diags.Error(), "variable c is not defined but is assigned a value") -} - -func TestVariablesWithoutDefinition(t *testing.T) { - t.Setenv("BUNDLE_VAR_a", "foo") - t.Setenv("BUNDLE_VAR_b", "bar") - b := load(t, "./variables/without_definition") - diags := bundle.Apply(context.Background(), b, mutator.SetVariables()) - require.NoError(t, diags.Error()) - require.True(t, b.Config.Variables["a"].HasValue()) - require.True(t, b.Config.Variables["b"].HasValue()) - assert.Equal(t, "foo", b.Config.Variables["a"].Value) - assert.Equal(t, "bar", b.Config.Variables["b"].Value) -} - -func TestVariablesWithTargetLookupOverrides(t *testing.T) { - b := load(t, "./variables/env_overrides") - - mockWorkspaceClient := mocks.NewMockWorkspaceClient(t) - b.SetWorkpaceClient(mockWorkspaceClient.WorkspaceClient) - instancePoolApi := mockWorkspaceClient.GetMockInstancePoolsAPI() - instancePoolApi.EXPECT().GetByInstancePoolName(mock.Anything, "some-test-instance-pool").Return(&compute.InstancePoolAndStats{ - InstancePoolId: "1234", - }, nil) - - clustersApi := mockWorkspaceClient.GetMockClustersAPI() - clustersApi.EXPECT().ListAll(mock.Anything, compute.ListClustersRequest{ - FilterBy: &compute.ListClustersFilterBy{ - ClusterSources: []compute.ClusterSource{compute.ClusterSourceApi, compute.ClusterSourceUi}, - }, - }).Return([]compute.ClusterDetails{ - {ClusterId: "4321", ClusterName: "some-test-cluster"}, - {ClusterId: "9876", ClusterName: "some-other-cluster"}, - }, nil) - - clusterPoliciesApi := mockWorkspaceClient.GetMockClusterPoliciesAPI() - clusterPoliciesApi.EXPECT().GetByName(mock.Anything, "some-test-cluster-policy").Return(&compute.Policy{ - PolicyId: "9876", - }, nil) - - diags := bundle.Apply(context.Background(), b, bundle.Seq( - mutator.SelectTarget("env-overrides-lookup"), - mutator.SetVariables(), - mutator.ResolveResourceReferences(), - )) - - require.NoError(t, diags.Error()) - assert.Equal(t, "4321", b.Config.Variables["d"].Value) - assert.Equal(t, "1234", b.Config.Variables["e"].Value) - assert.Equal(t, "9876", b.Config.Variables["f"].Value) -} - -func TestVariableTargetOverrides(t *testing.T) { - var tcases = []struct { - targetName string - pipelineName string - pipelineContinuous bool - pipelineNumWorkers int - }{ - { - "use-default-variable-values", - "a_string", - true, - 42, - }, - { - "override-string-variable", - "overridden_string", - true, - 42, - }, - { - "override-int-variable", - "a_string", - true, - 43, - }, - { - "override-both-bool-and-string-variables", - "overridden_string", - false, - 42, - }, - } - - for _, tcase := range tcases { - t.Run(tcase.targetName, func(t *testing.T) { - b := loadTarget(t, "./variables/variable_overrides_in_target", tcase.targetName) - diags := bundle.Apply(context.Background(), b, bundle.Seq( - mutator.SetVariables(), - mutator.ResolveVariableReferences("variables")), - ) - require.NoError(t, diags.Error()) - - assert.Equal(t, tcase.pipelineName, b.Config.Resources.Pipelines["my_pipeline"].Name) - assert.Equal(t, tcase.pipelineContinuous, b.Config.Resources.Pipelines["my_pipeline"].Continuous) - assert.Equal(t, tcase.pipelineNumWorkers, b.Config.Resources.Pipelines["my_pipeline"].Clusters[0].NumWorkers) - }) - } -} - -func TestBundleWithEmptyVariableLoads(t *testing.T) { - b := load(t, "./variables/empty") - diags := bundle.Apply(context.Background(), b, mutator.SetVariables()) - require.ErrorContains(t, diags.Error(), "no value assigned to required variable a") -} diff --git a/bundle/trampoline/python_dbr_warning.go b/bundle/trampoline/python_dbr_warning.go index cf3e9aeb3..0318df7c9 100644 --- a/bundle/trampoline/python_dbr_warning.go +++ b/bundle/trampoline/python_dbr_warning.go @@ -14,8 +14,7 @@ import ( "golang.org/x/mod/semver" ) -type wrapperWarning struct { -} +type wrapperWarning struct{} func WrapperWarning() bundle.Mutator { return &wrapperWarning{} @@ -62,7 +61,6 @@ func hasIncompatibleWheelTasks(ctx context.Context, b *bundle.Bundle) bool { if task.ExistingClusterId != "" { version, err := getSparkVersionForCluster(ctx, b.WorkspaceClient(), task.ExistingClusterId) - // If there's error getting spark version for cluster, do not mark it as incompatible if err != nil { log.Warnf(ctx, "unable to get spark version for cluster %s, err: %s", task.ExistingClusterId, err.Error()) diff --git a/bundle/trampoline/python_wheel.go b/bundle/trampoline/python_wheel.go index 8e309a625..075804479 100644 --- a/bundle/trampoline/python_wheel.go +++ b/bundle/trampoline/python_wheel.go @@ -2,6 +2,7 @@ package trampoline import ( "context" + "errors" "fmt" "strconv" "strings" @@ -147,7 +148,7 @@ func (t *pythonTrampoline) GetTemplateData(task *jobs.Task) (map[string]any, err func (t *pythonTrampoline) generateParameters(task *jobs.PythonWheelTask) (string, error) { if task.Parameters != nil && task.NamedParameters != nil { - return "", fmt.Errorf("not allowed to pass both paramaters and named_parameters") + return "", errors.New("not allowed to pass both paramaters and named_parameters") } params := append([]string{task.PackageName}, task.Parameters...) for k, v := range task.NamedParameters { diff --git a/bundle/trampoline/python_wheel_test.go b/bundle/trampoline/python_wheel_test.go index 517be35e4..d75a3eca3 100644 --- a/bundle/trampoline/python_wheel_test.go +++ b/bundle/trampoline/python_wheel_test.go @@ -127,7 +127,8 @@ func TestNoPanicWithNoPythonWheelTasks(t *testing.T) { Tasks: []jobs.Task{ { TaskKey: "notebook_task", - NotebookTask: &jobs.NotebookTask{}}, + NotebookTask: &jobs.NotebookTask{}, + }, }, }, }, diff --git a/bundle/trampoline/trampoline.go b/bundle/trampoline/trampoline.go index 1dc1c4463..600ce3d9c 100644 --- a/bundle/trampoline/trampoline.go +++ b/bundle/trampoline/trampoline.go @@ -62,7 +62,7 @@ func (m *trampoline) generateNotebookWrapper(ctx context.Context, b *bundle.Bund notebookName := fmt.Sprintf("notebook_%s_%s", task.JobKey, task.Task.TaskKey) localNotebookPath := filepath.Join(internalDir, notebookName+".py") - err = os.MkdirAll(filepath.Dir(localNotebookPath), 0755) + err = os.MkdirAll(filepath.Dir(localNotebookPath), 0o755) if err != nil { return err } diff --git a/bundle/trampoline/trampoline_test.go b/bundle/trampoline/trampoline_test.go index 4682d8fa0..6e6b8db48 100644 --- a/bundle/trampoline/trampoline_test.go +++ b/bundle/trampoline/trampoline_test.go @@ -2,7 +2,7 @@ package trampoline import ( "context" - "fmt" + "errors" "os" "path/filepath" "testing" @@ -30,7 +30,7 @@ func (f *functions) GetTasks(b *bundle.Bundle) []TaskWithJobKey { func (f *functions) GetTemplateData(task *jobs.Task) (map[string]any, error) { if task.PythonWheelTask == nil { - return nil, fmt.Errorf("PythonWheelTask cannot be nil") + return nil, errors.New("PythonWheelTask cannot be nil") } data := make(map[string]any) @@ -52,7 +52,8 @@ func TestGenerateTrampoline(t *testing.T) { PythonWheelTask: &jobs.PythonWheelTask{ PackageName: "test", EntryPoint: "run", - }}, + }, + }, } b := &bundle.Bundle{ diff --git a/cmd/account/cmd.go b/cmd/account/cmd.go index 9b4bb8139..f34966fd9 100644 --- a/cmd/account/cmd.go +++ b/cmd/account/cmd.go @@ -11,6 +11,7 @@ import ( credentials "github.com/databricks/cli/cmd/account/credentials" custom_app_integration "github.com/databricks/cli/cmd/account/custom-app-integration" encryption_keys "github.com/databricks/cli/cmd/account/encryption-keys" + account_federation_policy "github.com/databricks/cli/cmd/account/federation-policy" account_groups "github.com/databricks/cli/cmd/account/groups" account_ip_access_lists "github.com/databricks/cli/cmd/account/ip-access-lists" log_delivery "github.com/databricks/cli/cmd/account/log-delivery" @@ -21,6 +22,7 @@ import ( o_auth_published_apps "github.com/databricks/cli/cmd/account/o-auth-published-apps" private_access "github.com/databricks/cli/cmd/account/private-access" published_app_integration "github.com/databricks/cli/cmd/account/published-app-integration" + service_principal_federation_policy "github.com/databricks/cli/cmd/account/service-principal-federation-policy" service_principal_secrets "github.com/databricks/cli/cmd/account/service-principal-secrets" account_service_principals "github.com/databricks/cli/cmd/account/service-principals" account_settings "github.com/databricks/cli/cmd/account/settings" @@ -44,6 +46,7 @@ func New() *cobra.Command { cmd.AddCommand(credentials.New()) cmd.AddCommand(custom_app_integration.New()) cmd.AddCommand(encryption_keys.New()) + cmd.AddCommand(account_federation_policy.New()) cmd.AddCommand(account_groups.New()) cmd.AddCommand(account_ip_access_lists.New()) cmd.AddCommand(log_delivery.New()) @@ -54,6 +57,7 @@ func New() *cobra.Command { cmd.AddCommand(o_auth_published_apps.New()) cmd.AddCommand(private_access.New()) cmd.AddCommand(published_app_integration.New()) + cmd.AddCommand(service_principal_federation_policy.New()) cmd.AddCommand(service_principal_secrets.New()) cmd.AddCommand(account_service_principals.New()) cmd.AddCommand(account_settings.New()) diff --git a/cmd/account/federation-policy/federation-policy.go b/cmd/account/federation-policy/federation-policy.go new file mode 100755 index 000000000..d78ac709a --- /dev/null +++ b/cmd/account/federation-policy/federation-policy.go @@ -0,0 +1,402 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package federation_policy + +import ( + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/oauth2" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "federation-policy", + Short: `These APIs manage account federation policies.`, + Long: `These APIs manage account federation policies. + + Account federation policies allow users and service principals in your + Databricks account to securely access Databricks APIs using tokens from your + trusted identity providers (IdPs). + + With token federation, your users and service principals can exchange tokens + from your IdP for Databricks OAuth tokens, which can be used to access + Databricks APIs. Token federation eliminates the need to manage Databricks + secrets, and allows you to centralize management of token issuance policies in + your IdP. Databricks token federation is typically used in combination with + [SCIM], so users in your IdP are synchronized into your Databricks account. + + Token federation is configured in your Databricks account using an account + federation policy. An account federation policy specifies: * which IdP, or + issuer, your Databricks account should accept tokens from * how to determine + which Databricks user, or subject, a token is issued for + + To configure a federation policy, you provide the following: * The required + token __issuer__, as specified in the “iss” claim of your tokens. The + issuer is an https URL that identifies your IdP. * The allowed token + __audiences__, as specified in the “aud” claim of your tokens. This + identifier is intended to represent the recipient of the token. As long as the + audience in the token matches at least one audience in the policy, the token + is considered a match. If unspecified, the default value is your Databricks + account id. * The __subject claim__, which indicates which token claim + contains the Databricks username of the user the token was issued for. If + unspecified, the default value is “sub”. * Optionally, the public keys + used to validate the signature of your tokens, in JWKS format. If unspecified + (recommended), Databricks automatically fetches the public keys from your + issuer’s well known endpoint. Databricks strongly recommends relying on your + issuer’s well known endpoint for discovering public keys. + + An example federation policy is: issuer: "https://idp.mycompany.com/oidc" + audiences: ["databricks"] subject_claim: "sub" + + An example JWT token body that matches this policy and could be used to + authenticate to Databricks as user username@mycompany.com is: { "iss": + "https://idp.mycompany.com/oidc", "aud": "databricks", "sub": + "username@mycompany.com" } + + You may also need to configure your IdP to generate tokens for your users to + exchange with Databricks, if your users do not already have the ability to + generate tokens that are compatible with your federation policy. + + You do not need to configure an OAuth application in Databricks to use token + federation. + + [SCIM]: https://docs.databricks.com/admin/users-groups/scim/index.html`, + GroupID: "oauth2", + Annotations: map[string]string{ + "package": "oauth2", + }, + + // This service is being previewed; hide from help output. + Hidden: true, + } + + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newUpdate()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start create command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var createOverrides []func( + *cobra.Command, + *oauth2.CreateAccountFederationPolicyRequest, +) + +func newCreate() *cobra.Command { + cmd := &cobra.Command{} + + var createReq oauth2.CreateAccountFederationPolicyRequest + createReq.Policy = &oauth2.FederationPolicy{} + var createJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().StringVar(&createReq.Policy.Description, "description", createReq.Policy.Description, `Description of the federation policy.`) + cmd.Flags().StringVar(&createReq.Policy.Name, "name", createReq.Policy.Name, `Name of the federation policy.`) + // TODO: complex arg: oidc_policy + + cmd.Use = "create" + cmd.Short = `Create account federation policy.` + cmd.Long = `Create account federation policy.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustAccountClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + a := root.AccountClient(ctx) + + if cmd.Flags().Changed("json") { + diags := createJson.Unmarshal(&createReq.Policy) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } + } + } + + response, err := a.FederationPolicy.Create(ctx, createReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range createOverrides { + fn(cmd, &createReq) + } + + return cmd +} + +// start delete command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteOverrides []func( + *cobra.Command, + *oauth2.DeleteAccountFederationPolicyRequest, +) + +func newDelete() *cobra.Command { + cmd := &cobra.Command{} + + var deleteReq oauth2.DeleteAccountFederationPolicyRequest + + // TODO: short flags + + cmd.Use = "delete POLICY_ID" + cmd.Short = `Delete account federation policy.` + cmd.Long = `Delete account federation policy.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustAccountClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + a := root.AccountClient(ctx) + + deleteReq.PolicyId = args[0] + + err = a.FederationPolicy.Delete(ctx, deleteReq) + if err != nil { + return err + } + return nil + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deleteOverrides { + fn(cmd, &deleteReq) + } + + return cmd +} + +// start get command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getOverrides []func( + *cobra.Command, + *oauth2.GetAccountFederationPolicyRequest, +) + +func newGet() *cobra.Command { + cmd := &cobra.Command{} + + var getReq oauth2.GetAccountFederationPolicyRequest + + // TODO: short flags + + cmd.Use = "get POLICY_ID" + cmd.Short = `Get account federation policy.` + cmd.Long = `Get account federation policy.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustAccountClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + a := root.AccountClient(ctx) + + getReq.PolicyId = args[0] + + response, err := a.FederationPolicy.Get(ctx, getReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getOverrides { + fn(cmd, &getReq) + } + + return cmd +} + +// start list command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var listOverrides []func( + *cobra.Command, + *oauth2.ListAccountFederationPoliciesRequest, +) + +func newList() *cobra.Command { + cmd := &cobra.Command{} + + var listReq oauth2.ListAccountFederationPoliciesRequest + + // TODO: short flags + + cmd.Flags().IntVar(&listReq.PageSize, "page-size", listReq.PageSize, ``) + cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, ``) + + cmd.Use = "list" + cmd.Short = `List account federation policies.` + cmd.Long = `List account federation policies.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustAccountClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + a := root.AccountClient(ctx) + + response := a.FederationPolicy.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range listOverrides { + fn(cmd, &listReq) + } + + return cmd +} + +// start update command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateOverrides []func( + *cobra.Command, + *oauth2.UpdateAccountFederationPolicyRequest, +) + +func newUpdate() *cobra.Command { + cmd := &cobra.Command{} + + var updateReq oauth2.UpdateAccountFederationPolicyRequest + updateReq.Policy = &oauth2.FederationPolicy{} + var updateJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().StringVar(&updateReq.Policy.Description, "description", updateReq.Policy.Description, `Description of the federation policy.`) + cmd.Flags().StringVar(&updateReq.Policy.Name, "name", updateReq.Policy.Name, `Name of the federation policy.`) + // TODO: complex arg: oidc_policy + + cmd.Use = "update POLICY_ID UPDATE_MASK" + cmd.Short = `Update account federation policy.` + cmd.Long = `Update account federation policy. + + Arguments: + POLICY_ID: + UPDATE_MASK: Field mask is required to be passed into the PATCH request. Field mask + specifies which fields of the setting payload will be updated. The field + mask needs to be supplied as single string. To specify multiple fields in + the field mask, use comma as the separator (no space).` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(2) + return check(cmd, args) + } + + cmd.PreRunE = root.MustAccountClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + a := root.AccountClient(ctx) + + if cmd.Flags().Changed("json") { + diags := updateJson.Unmarshal(&updateReq.Policy) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } + } + } + updateReq.PolicyId = args[0] + updateReq.UpdateMask = args[1] + + response, err := a.FederationPolicy.Update(ctx, updateReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateOverrides { + fn(cmd, &updateReq) + } + + return cmd +} + +// end service AccountFederationPolicy diff --git a/cmd/account/service-principal-federation-policy/service-principal-federation-policy.go b/cmd/account/service-principal-federation-policy/service-principal-federation-policy.go new file mode 100755 index 000000000..77f73bcd0 --- /dev/null +++ b/cmd/account/service-principal-federation-policy/service-principal-federation-policy.go @@ -0,0 +1,445 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package service_principal_federation_policy + +import ( + "fmt" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/oauth2" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "service-principal-federation-policy", + Short: `These APIs manage service principal federation policies.`, + Long: `These APIs manage service principal federation policies. + + Service principal federation, also known as Workload Identity Federation, + allows your automated workloads running outside of Databricks to securely + access Databricks APIs without the need for Databricks secrets. With Workload + Identity Federation, your application (or workload) authenticates to + Databricks as a Databricks service principal, using tokens provided by the + workload runtime. + + Databricks strongly recommends using Workload Identity Federation to + authenticate to Databricks from automated workloads, over alternatives such as + OAuth client secrets or Personal Access Tokens, whenever possible. Workload + Identity Federation is supported by many popular services, including Github + Actions, Azure DevOps, GitLab, Terraform Cloud, and Kubernetes clusters, among + others. + + Workload identity federation is configured in your Databricks account using a + service principal federation policy. A service principal federation policy + specifies: * which IdP, or issuer, the service principal is allowed to + authenticate from * which workload identity, or subject, is allowed to + authenticate as the Databricks service principal + + To configure a federation policy, you provide the following: * The required + token __issuer__, as specified in the “iss” claim of workload identity + tokens. The issuer is an https URL that identifies the workload identity + provider. * The required token __subject__, as specified in the “sub” + claim of workload identity tokens. The subject uniquely identifies the + workload in the workload runtime environment. * The allowed token + __audiences__, as specified in the “aud” claim of workload identity + tokens. The audience is intended to represent the recipient of the token. As + long as the audience in the token matches at least one audience in the policy, + the token is considered a match. If unspecified, the default value is your + Databricks account id. * Optionally, the public keys used to validate the + signature of the workload identity tokens, in JWKS format. If unspecified + (recommended), Databricks automatically fetches the public keys from the + issuer’s well known endpoint. Databricks strongly recommends relying on the + issuer’s well known endpoint for discovering public keys. + + An example service principal federation policy, for a Github Actions workload, + is: issuer: "https://token.actions.githubusercontent.com" audiences: + ["https://github.com/my-github-org"] subject: + "repo:my-github-org/my-repo:environment:prod" + + An example JWT token body that matches this policy and could be used to + authenticate to Databricks is: { "iss": + "https://token.actions.githubusercontent.com", "aud": + "https://github.com/my-github-org", "sub": + "repo:my-github-org/my-repo:environment:prod" } + + You may also need to configure the workload runtime to generate tokens for + your workloads. + + You do not need to configure an OAuth application in Databricks to use token + federation.`, + GroupID: "oauth2", + Annotations: map[string]string{ + "package": "oauth2", + }, + + // This service is being previewed; hide from help output. + Hidden: true, + } + + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newUpdate()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start create command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var createOverrides []func( + *cobra.Command, + *oauth2.CreateServicePrincipalFederationPolicyRequest, +) + +func newCreate() *cobra.Command { + cmd := &cobra.Command{} + + var createReq oauth2.CreateServicePrincipalFederationPolicyRequest + createReq.Policy = &oauth2.FederationPolicy{} + var createJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().StringVar(&createReq.Policy.Description, "description", createReq.Policy.Description, `Description of the federation policy.`) + cmd.Flags().StringVar(&createReq.Policy.Name, "name", createReq.Policy.Name, `Name of the federation policy.`) + // TODO: complex arg: oidc_policy + + cmd.Use = "create SERVICE_PRINCIPAL_ID" + cmd.Short = `Create service principal federation policy.` + cmd.Long = `Create service principal federation policy. + + Arguments: + SERVICE_PRINCIPAL_ID: The service principal id for the federation policy.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustAccountClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + a := root.AccountClient(ctx) + + if cmd.Flags().Changed("json") { + diags := createJson.Unmarshal(&createReq.Policy) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } + } + } + _, err = fmt.Sscan(args[0], &createReq.ServicePrincipalId) + if err != nil { + return fmt.Errorf("invalid SERVICE_PRINCIPAL_ID: %s", args[0]) + } + + response, err := a.ServicePrincipalFederationPolicy.Create(ctx, createReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range createOverrides { + fn(cmd, &createReq) + } + + return cmd +} + +// start delete command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteOverrides []func( + *cobra.Command, + *oauth2.DeleteServicePrincipalFederationPolicyRequest, +) + +func newDelete() *cobra.Command { + cmd := &cobra.Command{} + + var deleteReq oauth2.DeleteServicePrincipalFederationPolicyRequest + + // TODO: short flags + + cmd.Use = "delete SERVICE_PRINCIPAL_ID POLICY_ID" + cmd.Short = `Delete service principal federation policy.` + cmd.Long = `Delete service principal federation policy. + + Arguments: + SERVICE_PRINCIPAL_ID: The service principal id for the federation policy. + POLICY_ID: ` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(2) + return check(cmd, args) + } + + cmd.PreRunE = root.MustAccountClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + a := root.AccountClient(ctx) + + _, err = fmt.Sscan(args[0], &deleteReq.ServicePrincipalId) + if err != nil { + return fmt.Errorf("invalid SERVICE_PRINCIPAL_ID: %s", args[0]) + } + deleteReq.PolicyId = args[1] + + err = a.ServicePrincipalFederationPolicy.Delete(ctx, deleteReq) + if err != nil { + return err + } + return nil + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deleteOverrides { + fn(cmd, &deleteReq) + } + + return cmd +} + +// start get command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getOverrides []func( + *cobra.Command, + *oauth2.GetServicePrincipalFederationPolicyRequest, +) + +func newGet() *cobra.Command { + cmd := &cobra.Command{} + + var getReq oauth2.GetServicePrincipalFederationPolicyRequest + + // TODO: short flags + + cmd.Use = "get SERVICE_PRINCIPAL_ID POLICY_ID" + cmd.Short = `Get service principal federation policy.` + cmd.Long = `Get service principal federation policy. + + Arguments: + SERVICE_PRINCIPAL_ID: The service principal id for the federation policy. + POLICY_ID: ` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(2) + return check(cmd, args) + } + + cmd.PreRunE = root.MustAccountClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + a := root.AccountClient(ctx) + + _, err = fmt.Sscan(args[0], &getReq.ServicePrincipalId) + if err != nil { + return fmt.Errorf("invalid SERVICE_PRINCIPAL_ID: %s", args[0]) + } + getReq.PolicyId = args[1] + + response, err := a.ServicePrincipalFederationPolicy.Get(ctx, getReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getOverrides { + fn(cmd, &getReq) + } + + return cmd +} + +// start list command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var listOverrides []func( + *cobra.Command, + *oauth2.ListServicePrincipalFederationPoliciesRequest, +) + +func newList() *cobra.Command { + cmd := &cobra.Command{} + + var listReq oauth2.ListServicePrincipalFederationPoliciesRequest + + // TODO: short flags + + cmd.Flags().IntVar(&listReq.PageSize, "page-size", listReq.PageSize, ``) + cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, ``) + + cmd.Use = "list SERVICE_PRINCIPAL_ID" + cmd.Short = `List service principal federation policies.` + cmd.Long = `List service principal federation policies. + + Arguments: + SERVICE_PRINCIPAL_ID: The service principal id for the federation policy.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustAccountClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + a := root.AccountClient(ctx) + + _, err = fmt.Sscan(args[0], &listReq.ServicePrincipalId) + if err != nil { + return fmt.Errorf("invalid SERVICE_PRINCIPAL_ID: %s", args[0]) + } + + response := a.ServicePrincipalFederationPolicy.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range listOverrides { + fn(cmd, &listReq) + } + + return cmd +} + +// start update command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateOverrides []func( + *cobra.Command, + *oauth2.UpdateServicePrincipalFederationPolicyRequest, +) + +func newUpdate() *cobra.Command { + cmd := &cobra.Command{} + + var updateReq oauth2.UpdateServicePrincipalFederationPolicyRequest + updateReq.Policy = &oauth2.FederationPolicy{} + var updateJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().StringVar(&updateReq.Policy.Description, "description", updateReq.Policy.Description, `Description of the federation policy.`) + cmd.Flags().StringVar(&updateReq.Policy.Name, "name", updateReq.Policy.Name, `Name of the federation policy.`) + // TODO: complex arg: oidc_policy + + cmd.Use = "update SERVICE_PRINCIPAL_ID POLICY_ID UPDATE_MASK" + cmd.Short = `Update service principal federation policy.` + cmd.Long = `Update service principal federation policy. + + Arguments: + SERVICE_PRINCIPAL_ID: The service principal id for the federation policy. + POLICY_ID: + UPDATE_MASK: Field mask is required to be passed into the PATCH request. Field mask + specifies which fields of the setting payload will be updated. The field + mask needs to be supplied as single string. To specify multiple fields in + the field mask, use comma as the separator (no space).` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(3) + return check(cmd, args) + } + + cmd.PreRunE = root.MustAccountClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + a := root.AccountClient(ctx) + + if cmd.Flags().Changed("json") { + diags := updateJson.Unmarshal(&updateReq.Policy) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } + } + } + _, err = fmt.Sscan(args[0], &updateReq.ServicePrincipalId) + if err != nil { + return fmt.Errorf("invalid SERVICE_PRINCIPAL_ID: %s", args[0]) + } + updateReq.PolicyId = args[1] + updateReq.UpdateMask = args[2] + + response, err := a.ServicePrincipalFederationPolicy.Update(ctx, updateReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateOverrides { + fn(cmd, &updateReq) + } + + return cmd +} + +// end service ServicePrincipalFederationPolicy diff --git a/cmd/api/api.go b/cmd/api/api.go index d33939a52..c3a3eb0b6 100644 --- a/cmd/api/api.go +++ b/cmd/api/api.go @@ -39,7 +39,7 @@ func makeCommand(method string) *cobra.Command { Args: root.ExactArgs(1), Short: fmt.Sprintf("Perform %s request", method), RunE: func(cmd *cobra.Command, args []string) error { - var path = args[0] + path := args[0] var request any diags := payload.Unmarshal(&request) diff --git a/cmd/auth/auth.go b/cmd/auth/auth.go index ceceae25c..4261e93e7 100644 --- a/cmd/auth/auth.go +++ b/cmd/auth/auth.go @@ -2,7 +2,7 @@ package auth import ( "context" - "fmt" + "errors" "github.com/databricks/cli/libs/auth" "github.com/databricks/cli/libs/cmdio" @@ -36,7 +36,7 @@ GCP: https://docs.gcp.databricks.com/dev-tools/auth/index.html`, func promptForHost(ctx context.Context) (string, error) { if !cmdio.IsInTTY(ctx) { - return "", fmt.Errorf("the command is being run in a non-interactive environment, please specify a host using --host") + return "", errors.New("the command is being run in a non-interactive environment, please specify a host using --host") } prompt := cmdio.Prompt(ctx) @@ -46,7 +46,7 @@ func promptForHost(ctx context.Context) (string, error) { func promptForAccountID(ctx context.Context) (string, error) { if !cmdio.IsInTTY(ctx) { - return "", fmt.Errorf("the command is being run in a non-interactive environment, please specify an account ID using --account-id") + return "", errors.New("the command is being run in a non-interactive environment, please specify an account ID using --account-id") } prompt := cmdio.Prompt(ctx) diff --git a/cmd/auth/describe.go b/cmd/auth/describe.go index 3a6e3d5d7..faaf64f8f 100644 --- a/cmd/auth/describe.go +++ b/cmd/auth/describe.go @@ -59,7 +59,6 @@ func newDescribeCommand() *cobra.Command { isAccount, err := root.MustAnyClient(cmd, args) return root.ConfigUsed(cmd.Context()), isAccount, err }) - if err != nil { return err } @@ -141,7 +140,10 @@ func render(ctx context.Context, cmd *cobra.Command, status *authStatus, templat if err != nil { return err } - cmd.OutOrStdout().Write(buf) + _, err = cmd.OutOrStdout().Write(buf) + if err != nil { + return err + } default: return fmt.Errorf("unknown output type %s", root.OutputType(cmd)) } diff --git a/cmd/auth/describe_test.go b/cmd/auth/describe_test.go index d0260abc7..35e0c6e64 100644 --- a/cmd/auth/describe_test.go +++ b/cmd/auth/describe_test.go @@ -2,7 +2,7 @@ package auth import ( "context" - "fmt" + "errors" "testing" "github.com/databricks/cli/cmd/root" @@ -31,7 +31,8 @@ func TestGetWorkspaceAuthStatus(t *testing.T) { cmd.Flags().String("host", "", "") cmd.Flags().String("profile", "", "") - cmd.Flag("profile").Value.Set("my-profile") + err := cmd.Flag("profile").Value.Set("my-profile") + require.NoError(t, err) cmd.Flag("profile").Changed = true cfg := &config.Config{ @@ -39,14 +40,16 @@ func TestGetWorkspaceAuthStatus(t *testing.T) { } m.WorkspaceClient.Config = cfg t.Setenv("DATABRICKS_AUTH_TYPE", "azure-cli") - config.ConfigAttributes.Configure(cfg) + err = config.ConfigAttributes.Configure(cfg) + require.NoError(t, err) status, err := getAuthStatus(cmd, []string{}, showSensitive, func(cmd *cobra.Command, args []string) (*config.Config, bool, error) { - config.ConfigAttributes.ResolveFromStringMap(cfg, map[string]string{ + err := config.ConfigAttributes.ResolveFromStringMap(cfg, map[string]string{ "host": "https://test.com", "token": "test-token", "auth_type": "azure-cli", }) + require.NoError(t, err) return cfg, false, nil }) require.NoError(t, err) @@ -81,7 +84,8 @@ func TestGetWorkspaceAuthStatusError(t *testing.T) { cmd.Flags().String("host", "", "") cmd.Flags().String("profile", "", "") - cmd.Flag("profile").Value.Set("my-profile") + err := cmd.Flag("profile").Value.Set("my-profile") + require.NoError(t, err) cmd.Flag("profile").Changed = true cfg := &config.Config{ @@ -89,15 +93,16 @@ func TestGetWorkspaceAuthStatusError(t *testing.T) { } m.WorkspaceClient.Config = cfg t.Setenv("DATABRICKS_AUTH_TYPE", "azure-cli") - config.ConfigAttributes.Configure(cfg) + err = config.ConfigAttributes.Configure(cfg) + require.NoError(t, err) status, err := getAuthStatus(cmd, []string{}, showSensitive, func(cmd *cobra.Command, args []string) (*config.Config, bool, error) { - config.ConfigAttributes.ResolveFromStringMap(cfg, map[string]string{ + err = config.ConfigAttributes.ResolveFromStringMap(cfg, map[string]string{ "host": "https://test.com", "token": "test-token", "auth_type": "azure-cli", }) - return cfg, false, fmt.Errorf("auth error") + return cfg, false, errors.New("auth error") }) require.NoError(t, err) require.NotNil(t, status) @@ -128,7 +133,8 @@ func TestGetWorkspaceAuthStatusSensitive(t *testing.T) { cmd.Flags().String("host", "", "") cmd.Flags().String("profile", "", "") - cmd.Flag("profile").Value.Set("my-profile") + err := cmd.Flag("profile").Value.Set("my-profile") + require.NoError(t, err) cmd.Flag("profile").Changed = true cfg := &config.Config{ @@ -136,15 +142,16 @@ func TestGetWorkspaceAuthStatusSensitive(t *testing.T) { } m.WorkspaceClient.Config = cfg t.Setenv("DATABRICKS_AUTH_TYPE", "azure-cli") - config.ConfigAttributes.Configure(cfg) + err = config.ConfigAttributes.Configure(cfg) + require.NoError(t, err) status, err := getAuthStatus(cmd, []string{}, showSensitive, func(cmd *cobra.Command, args []string) (*config.Config, bool, error) { - config.ConfigAttributes.ResolveFromStringMap(cfg, map[string]string{ + err = config.ConfigAttributes.ResolveFromStringMap(cfg, map[string]string{ "host": "https://test.com", "token": "test-token", "auth_type": "azure-cli", }) - return cfg, false, fmt.Errorf("auth error") + return cfg, false, errors.New("auth error") }) require.NoError(t, err) require.NotNil(t, status) @@ -171,7 +178,8 @@ func TestGetAccountAuthStatus(t *testing.T) { cmd.Flags().String("host", "", "") cmd.Flags().String("profile", "", "") - cmd.Flag("profile").Value.Set("my-profile") + err := cmd.Flag("profile").Value.Set("my-profile") + require.NoError(t, err) cmd.Flag("profile").Changed = true cfg := &config.Config{ @@ -179,13 +187,14 @@ func TestGetAccountAuthStatus(t *testing.T) { } m.AccountClient.Config = cfg t.Setenv("DATABRICKS_AUTH_TYPE", "azure-cli") - config.ConfigAttributes.Configure(cfg) + err = config.ConfigAttributes.Configure(cfg) + require.NoError(t, err) wsApi := m.GetMockWorkspacesAPI() wsApi.EXPECT().List(mock.Anything).Return(nil, nil) status, err := getAuthStatus(cmd, []string{}, showSensitive, func(cmd *cobra.Command, args []string) (*config.Config, bool, error) { - config.ConfigAttributes.ResolveFromStringMap(cfg, map[string]string{ + err = config.ConfigAttributes.ResolveFromStringMap(cfg, map[string]string{ "account_id": "test-account-id", "username": "test-user", "host": "https://test.com", diff --git a/cmd/auth/env.go b/cmd/auth/env.go index e72d15399..11149af8c 100644 --- a/cmd/auth/env.go +++ b/cmd/auth/env.go @@ -23,9 +23,9 @@ func canonicalHost(host string) (string, error) { } // If the host is empty, assume the scheme wasn't included. if parsedHost.Host == "" { - return fmt.Sprintf("https://%s", host), nil + return "https://" + host, nil } - return fmt.Sprintf("https://%s", parsedHost.Host), nil + return "https://" + parsedHost.Host, nil } var ErrNoMatchingProfiles = errors.New("no matching profiles found") @@ -138,7 +138,7 @@ func newEnvCommand() *cobra.Command { if err != nil { return err } - cmd.OutOrStdout().Write(raw) + _, _ = cmd.OutOrStdout().Write(raw) return nil } diff --git a/cmd/auth/login.go b/cmd/auth/login.go index 79b795468..a6d0bf4cc 100644 --- a/cmd/auth/login.go +++ b/cmd/auth/login.go @@ -29,8 +29,10 @@ func promptForProfile(ctx context.Context, defaultValue string) (string, error) return prompt.Run() } -const minimalDbConnectVersion = "13.1" -const defaultTimeout = 1 * time.Hour +const ( + minimalDbConnectVersion = "13.1" + defaultTimeout = 1 * time.Hour +) func newLoginCommand(persistentAuth *auth.PersistentAuth) *cobra.Command { defaultConfigPath := "~/.databrickscfg" @@ -174,7 +176,7 @@ depends on the existing profiles you have set in your configuration file func setHostAndAccountId(ctx context.Context, profileName string, persistentAuth *auth.PersistentAuth, args []string) error { // If both [HOST] and --host are provided, return an error. if len(args) > 0 && persistentAuth.Host != "" { - return fmt.Errorf("please only provide a host as an argument or a flag, not both") + return errors.New("please only provide a host as an argument or a flag, not both") } profiler := profile.GetProfiler(ctx) diff --git a/cmd/auth/token.go b/cmd/auth/token.go index 3f9af43fa..fbf8b68f6 100644 --- a/cmd/auth/token.go +++ b/cmd/auth/token.go @@ -94,7 +94,7 @@ func newTokenCommand(persistentAuth *auth.PersistentAuth) *cobra.Command { if err != nil { return err } - cmd.OutOrStdout().Write(raw) + _, _ = cmd.OutOrStdout().Write(raw) return nil } diff --git a/cmd/bundle/debug/terraform.go b/cmd/bundle/debug/terraform.go index 843ecac4e..c7d49ebb2 100644 --- a/cmd/bundle/debug/terraform.go +++ b/cmd/bundle/debug/terraform.go @@ -60,13 +60,13 @@ For more information about filesystem mirrors, see the Terraform documentation: } switch root.OutputType(cmd) { case flags.OutputText: - cmdio.Render(cmd.Context(), dependencies.Terraform) + _ = cmdio.Render(cmd.Context(), dependencies.Terraform) case flags.OutputJSON: buf, err := json.MarshalIndent(dependencies, "", " ") if err != nil { return err } - cmd.OutOrStdout().Write(buf) + _, _ = cmd.OutOrStdout().Write(buf) default: return fmt.Errorf("unknown output type %s", root.OutputType(cmd)) } diff --git a/cmd/bundle/deploy.go b/cmd/bundle/deploy.go index a25e02f6c..560b07e39 100644 --- a/cmd/bundle/deploy.go +++ b/cmd/bundle/deploy.go @@ -5,6 +5,7 @@ import ( "fmt" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config/validate" "github.com/databricks/cli/bundle/phases" "github.com/databricks/cli/bundle/render" "github.com/databricks/cli/cmd/bundle/utils" @@ -71,6 +72,7 @@ func newDeployCommand() *cobra.Command { diags = diags.Extend( bundle.Apply(ctx, b, bundle.Seq( phases.Initialize(), + validate.FastValidate(), phases.Build(), phases.Deploy(outputHandler), )), diff --git a/cmd/bundle/destroy.go b/cmd/bundle/destroy.go index 711abbcd7..0b2f14875 100644 --- a/cmd/bundle/destroy.go +++ b/cmd/bundle/destroy.go @@ -2,7 +2,7 @@ package bundle import ( "context" - "fmt" + "errors" "os" "github.com/databricks/cli/bundle" @@ -49,16 +49,16 @@ func newDestroyCommand() *cobra.Command { // we require auto-approve for non tty terminals since interactive consent // is not possible if !term.IsTerminal(int(os.Stderr.Fd())) && !autoApprove { - return fmt.Errorf("please specify --auto-approve to skip interactive confirmation checks for non tty consoles") + return errors.New("please specify --auto-approve to skip interactive confirmation checks for non tty consoles") } // Check auto-approve is selected for json logging logger, ok := cmdio.FromContext(ctx) if !ok { - return fmt.Errorf("progress logger not found") + return errors.New("progress logger not found") } if logger.Mode == flags.ModeJson && !autoApprove { - return fmt.Errorf("please specify --auto-approve since selected logging format is json") + return errors.New("please specify --auto-approve since selected logging format is json") } diags = bundle.Apply(ctx, b, bundle.Seq( diff --git a/cmd/bundle/generate/dashboard.go b/cmd/bundle/generate/dashboard.go index 4a538a293..fa3c91b2a 100644 --- a/cmd/bundle/generate/dashboard.go +++ b/cmd/bundle/generate/dashboard.go @@ -96,7 +96,7 @@ func (d *dashboard) resolveFromPath(ctx context.Context, b *bundle.Bundle) (stri return "", diag.Diagnostics{ { Severity: diag.Error, - Summary: fmt.Sprintf("expected a dashboard, found a %s", found), + Summary: "expected a dashboard, found a " + found, }, } } @@ -158,7 +158,7 @@ func (d *dashboard) saveSerializedDashboard(_ context.Context, b *bundle.Bundle, } // Make sure the output directory exists. - if err := os.MkdirAll(filepath.Dir(filename), 0755); err != nil { + if err := os.MkdirAll(filepath.Dir(filename), 0o755); err != nil { return err } @@ -183,12 +183,12 @@ func (d *dashboard) saveSerializedDashboard(_ context.Context, b *bundle.Bundle, } fmt.Printf("Writing dashboard to %q\n", rel) - return os.WriteFile(filename, data, 0644) + return os.WriteFile(filename, data, 0o644) } func (d *dashboard) saveConfiguration(ctx context.Context, b *bundle.Bundle, dashboard *dashboards.Dashboard, key string) error { // Save serialized dashboard definition to the dashboard directory. - dashboardBasename := fmt.Sprintf("%s.lvdash.json", key) + dashboardBasename := key + ".lvdash.json" dashboardPath := filepath.Join(d.dashboardDir, dashboardBasename) err := d.saveSerializedDashboard(ctx, b, dashboard, dashboardPath) if err != nil { @@ -210,12 +210,12 @@ func (d *dashboard) saveConfiguration(ctx context.Context, b *bundle.Bundle, das } // Make sure the output directory exists. - if err := os.MkdirAll(d.resourceDir, 0755); err != nil { + if err := os.MkdirAll(d.resourceDir, 0o755); err != nil { return err } // Save the configuration to the resource directory. - resourcePath := filepath.Join(d.resourceDir, fmt.Sprintf("%s.dashboard.yml", key)) + resourcePath := filepath.Join(d.resourceDir, key+".dashboard.yml") saver := yamlsaver.NewSaverWithStyle(map[string]yaml.Style{ "display_name": yaml.DoubleQuotedStyle, }) diff --git a/cmd/bundle/generate/dashboard_test.go b/cmd/bundle/generate/dashboard_test.go index 6741e6a39..33a463ea0 100644 --- a/cmd/bundle/generate/dashboard_test.go +++ b/cmd/bundle/generate/dashboard_test.go @@ -44,7 +44,7 @@ func TestDashboard_ErrorOnLegacyDashboard(t *testing.T) { _, diags := d.resolveID(ctx, b) require.Len(t, diags, 1) - assert.Equal(t, diags[0].Summary, "dashboard \"legacy dashboard\" is a legacy dashboard") + assert.Equal(t, "dashboard \"legacy dashboard\" is a legacy dashboard", diags[0].Summary) } func TestDashboard_ExistingID_Nominal(t *testing.T) { @@ -67,9 +67,10 @@ func TestDashboard_ExistingID_Nominal(t *testing.T) { ctx := bundle.Context(context.Background(), b) cmd := NewGenerateDashboardCommand() cmd.SetContext(ctx) - cmd.Flag("existing-id").Value.Set("f00dcafe") + err := cmd.Flag("existing-id").Value.Set("f00dcafe") + require.NoError(t, err) - err := cmd.RunE(cmd, []string{}) + err = cmd.RunE(cmd, []string{}) require.NoError(t, err) // Assert the contents of the generated configuration @@ -105,9 +106,10 @@ func TestDashboard_ExistingID_NotFound(t *testing.T) { ctx := bundle.Context(context.Background(), b) cmd := NewGenerateDashboardCommand() cmd.SetContext(ctx) - cmd.Flag("existing-id").Value.Set("f00dcafe") + err := cmd.Flag("existing-id").Value.Set("f00dcafe") + require.NoError(t, err) - err := cmd.RunE(cmd, []string{}) + err = cmd.RunE(cmd, []string{}) require.Error(t, err) } @@ -137,9 +139,10 @@ func TestDashboard_ExistingPath_Nominal(t *testing.T) { ctx := bundle.Context(context.Background(), b) cmd := NewGenerateDashboardCommand() cmd.SetContext(ctx) - cmd.Flag("existing-path").Value.Set("/path/to/dashboard") + err := cmd.Flag("existing-path").Value.Set("/path/to/dashboard") + require.NoError(t, err) - err := cmd.RunE(cmd, []string{}) + err = cmd.RunE(cmd, []string{}) require.NoError(t, err) // Assert the contents of the generated configuration @@ -175,8 +178,9 @@ func TestDashboard_ExistingPath_NotFound(t *testing.T) { ctx := bundle.Context(context.Background(), b) cmd := NewGenerateDashboardCommand() cmd.SetContext(ctx) - cmd.Flag("existing-path").Value.Set("/path/to/dashboard") + err := cmd.Flag("existing-path").Value.Set("/path/to/dashboard") + require.NoError(t, err) - err := cmd.RunE(cmd, []string{}) + err = cmd.RunE(cmd, []string{}) require.Error(t, err) } diff --git a/cmd/bundle/generate/generate_test.go b/cmd/bundle/generate/generate_test.go index bc1549e64..395d4ebd4 100644 --- a/cmd/bundle/generate/generate_test.go +++ b/cmd/bundle/generate/generate_test.go @@ -3,7 +3,6 @@ package generate import ( "bytes" "context" - "errors" "fmt" "io" "io/fs" @@ -78,13 +77,13 @@ func TestGeneratePipelineCommand(t *testing.T) { workspaceApi.EXPECT().Download(mock.Anything, "/test/file.py", mock.Anything).Return(pyContent, nil) cmd.SetContext(bundle.Context(context.Background(), b)) - cmd.Flag("existing-pipeline-id").Value.Set("test-pipeline") + require.NoError(t, cmd.Flag("existing-pipeline-id").Value.Set("test-pipeline")) configDir := filepath.Join(root, "resources") - cmd.Flag("config-dir").Value.Set(configDir) + require.NoError(t, cmd.Flag("config-dir").Value.Set(configDir)) srcDir := filepath.Join(root, "src") - cmd.Flag("source-dir").Value.Set(srcDir) + require.NoError(t, cmd.Flag("source-dir").Value.Set(srcDir)) var key string cmd.Flags().StringVar(&key, "key", "test_pipeline", "") @@ -174,13 +173,13 @@ func TestGenerateJobCommand(t *testing.T) { workspaceApi.EXPECT().Download(mock.Anything, "/test/notebook", mock.Anything).Return(notebookContent, nil) cmd.SetContext(bundle.Context(context.Background(), b)) - cmd.Flag("existing-job-id").Value.Set("1234") + require.NoError(t, cmd.Flag("existing-job-id").Value.Set("1234")) configDir := filepath.Join(root, "resources") - cmd.Flag("config-dir").Value.Set(configDir) + require.NoError(t, cmd.Flag("config-dir").Value.Set(configDir)) srcDir := filepath.Join(root, "src") - cmd.Flag("source-dir").Value.Set(srcDir) + require.NoError(t, cmd.Flag("source-dir").Value.Set(srcDir)) var key string cmd.Flags().StringVar(&key, "key", "test_job", "") @@ -217,7 +216,7 @@ func TestGenerateJobCommand(t *testing.T) { } func touchEmptyFile(t *testing.T, path string) { - err := os.MkdirAll(filepath.Dir(path), 0700) + err := os.MkdirAll(filepath.Dir(path), 0o700) require.NoError(t, err) f, err := os.Create(path) require.NoError(t, err) @@ -279,13 +278,13 @@ func TestGenerateJobCommandOldFileRename(t *testing.T) { workspaceApi.EXPECT().Download(mock.Anything, "/test/notebook", mock.Anything).Return(notebookContent, nil) cmd.SetContext(bundle.Context(context.Background(), b)) - cmd.Flag("existing-job-id").Value.Set("1234") + require.NoError(t, cmd.Flag("existing-job-id").Value.Set("1234")) configDir := filepath.Join(root, "resources") - cmd.Flag("config-dir").Value.Set(configDir) + require.NoError(t, cmd.Flag("config-dir").Value.Set(configDir)) srcDir := filepath.Join(root, "src") - cmd.Flag("source-dir").Value.Set(srcDir) + require.NoError(t, cmd.Flag("source-dir").Value.Set(srcDir)) var key string cmd.Flags().StringVar(&key, "key", "test_job", "") @@ -295,14 +294,14 @@ func TestGenerateJobCommandOldFileRename(t *testing.T) { touchEmptyFile(t, oldFilename) // Having an existing files require --force flag to regenerate them - cmd.Flag("force").Value.Set("true") + require.NoError(t, cmd.Flag("force").Value.Set("true")) err := cmd.RunE(cmd, []string{}) require.NoError(t, err) // Make sure file do not exists after the run _, err = os.Stat(oldFilename) - require.True(t, errors.Is(err, fs.ErrNotExist)) + require.ErrorIs(t, err, fs.ErrNotExist) data, err := os.ReadFile(filepath.Join(configDir, "test_job.job.yml")) require.NoError(t, err) diff --git a/cmd/bundle/generate/job.go b/cmd/bundle/generate/job.go index 9ac41e3cb..827d270e5 100644 --- a/cmd/bundle/generate/job.go +++ b/cmd/bundle/generate/job.go @@ -85,8 +85,8 @@ func NewGenerateJobCommand() *cobra.Command { return err } - oldFilename := filepath.Join(configDir, fmt.Sprintf("%s.yml", jobKey)) - filename := filepath.Join(configDir, fmt.Sprintf("%s.job.yml", jobKey)) + oldFilename := filepath.Join(configDir, jobKey+".yml") + filename := filepath.Join(configDir, jobKey+".job.yml") // User might continuously run generate command to update their bundle jobs with any changes made in Databricks UI. // Due to changing in the generated file names, we need to first rename existing resource file to the new name. @@ -107,7 +107,7 @@ func NewGenerateJobCommand() *cobra.Command { return err } - cmdio.LogString(ctx, fmt.Sprintf("Job configuration successfully saved to %s", filename)) + cmdio.LogString(ctx, "Job configuration successfully saved to "+filename) return nil } diff --git a/cmd/bundle/generate/pipeline.go b/cmd/bundle/generate/pipeline.go index 910baa45f..863b0b2f7 100644 --- a/cmd/bundle/generate/pipeline.go +++ b/cmd/bundle/generate/pipeline.go @@ -85,8 +85,8 @@ func NewGeneratePipelineCommand() *cobra.Command { return err } - oldFilename := filepath.Join(configDir, fmt.Sprintf("%s.yml", pipelineKey)) - filename := filepath.Join(configDir, fmt.Sprintf("%s.pipeline.yml", pipelineKey)) + oldFilename := filepath.Join(configDir, pipelineKey+".yml") + filename := filepath.Join(configDir, pipelineKey+".pipeline.yml") // User might continuously run generate command to update their bundle jobs with any changes made in Databricks UI. // Due to changing in the generated file names, we need to first rename existing resource file to the new name. @@ -109,7 +109,7 @@ func NewGeneratePipelineCommand() *cobra.Command { return err } - cmdio.LogString(ctx, fmt.Sprintf("Pipeline configuration successfully saved to %s", filename)) + cmdio.LogString(ctx, "Pipeline configuration successfully saved to "+filename) return nil } diff --git a/cmd/bundle/generate/utils.go b/cmd/bundle/generate/utils.go index 65f692419..dbfad9438 100644 --- a/cmd/bundle/generate/utils.go +++ b/cmd/bundle/generate/utils.go @@ -87,7 +87,7 @@ func (n *downloader) markNotebookForDownload(ctx context.Context, notebookPath * } func (n *downloader) FlushToDisk(ctx context.Context, force bool) error { - err := os.MkdirAll(n.sourceDir, 0755) + err := os.MkdirAll(n.sourceDir, 0o755) if err != nil { return err } @@ -126,7 +126,7 @@ func (n *downloader) FlushToDisk(ctx context.Context, force bool) error { return err } - cmdio.LogString(errCtx, fmt.Sprintf("File successfully saved to %s", targetPath)) + cmdio.LogString(errCtx, "File successfully saved to "+targetPath) return reader.Close() }) } @@ -134,7 +134,7 @@ func (n *downloader) FlushToDisk(ctx context.Context, force bool) error { return errs.Wait() } -func newDownloader(w *databricks.WorkspaceClient, sourceDir string, configDir string) *downloader { +func newDownloader(w *databricks.WorkspaceClient, sourceDir, configDir string) *downloader { return &downloader{ files: make(map[string]string), w: w, diff --git a/cmd/bundle/launch.go b/cmd/bundle/launch.go index 0d2b4233b..3fea839c9 100644 --- a/cmd/bundle/launch.go +++ b/cmd/bundle/launch.go @@ -1,7 +1,7 @@ package bundle import ( - "fmt" + "errors" "github.com/databricks/cli/cmd/root" "github.com/spf13/cobra" @@ -19,7 +19,7 @@ func newLaunchCommand() *cobra.Command { } cmd.RunE = func(cmd *cobra.Command, args []string) error { - return fmt.Errorf("TODO") + return errors.New("TODO") // contents, err := os.ReadFile(args[0]) // if err != nil { // return err diff --git a/cmd/bundle/open.go b/cmd/bundle/open.go index a2ad32fd8..5a26e1ea7 100644 --- a/cmd/bundle/open.go +++ b/cmd/bundle/open.go @@ -44,7 +44,7 @@ func resolveOpenArgument(ctx context.Context, b *bundle.Bundle, args []string) ( } if len(args) < 1 { - return "", fmt.Errorf("expected a KEY of the resource to open") + return "", errors.New("expected a KEY of the resource to open") } return args[0], nil @@ -113,7 +113,7 @@ func newOpenCommand() *cobra.Command { // Confirm that the resource has a URL. url := ref.Resource.GetURL() if url == "" { - return fmt.Errorf("resource does not have a URL associated with it (has it been deployed?)") + return errors.New("resource does not have a URL associated with it (has it been deployed?)") } return browser.OpenURL(url) diff --git a/cmd/bundle/run.go b/cmd/bundle/run.go index 7a92766d9..df35d7222 100644 --- a/cmd/bundle/run.go +++ b/cmd/bundle/run.go @@ -3,6 +3,7 @@ package bundle import ( "context" "encoding/json" + "errors" "fmt" "github.com/databricks/cli/bundle" @@ -48,7 +49,7 @@ func resolveRunArgument(ctx context.Context, b *bundle.Bundle, args []string) (s } if len(args) < 1 { - return "", nil, fmt.Errorf("expected a KEY of the resource to run") + return "", nil, errors.New("expected a KEY of the resource to run") } return args[0], args[1:], nil @@ -159,13 +160,19 @@ task or a Python wheel task, the second example applies. if err != nil { return err } - cmd.OutOrStdout().Write([]byte(resultString)) + _, err = cmd.OutOrStdout().Write([]byte(resultString)) + if err != nil { + return err + } case flags.OutputJSON: b, err := json.MarshalIndent(output, "", " ") if err != nil { return err } - cmd.OutOrStdout().Write(b) + _, err = cmd.OutOrStdout().Write(b) + if err != nil { + return err + } default: return fmt.Errorf("unknown output type %s", root.OutputType(cmd)) } diff --git a/cmd/bundle/summary.go b/cmd/bundle/summary.go index 8c34dd612..7c669c845 100644 --- a/cmd/bundle/summary.go +++ b/cmd/bundle/summary.go @@ -73,7 +73,7 @@ func newSummaryCommand() *cobra.Command { if err != nil { return err } - cmd.OutOrStdout().Write(buf) + _, _ = cmd.OutOrStdout().Write(buf) default: return fmt.Errorf("unknown output type %s", root.OutputType(cmd)) } diff --git a/cmd/bundle/test.go b/cmd/bundle/test.go index 4d30e727d..794575220 100644 --- a/cmd/bundle/test.go +++ b/cmd/bundle/test.go @@ -1,7 +1,7 @@ package bundle import ( - "fmt" + "errors" "github.com/spf13/cobra" ) @@ -17,7 +17,7 @@ func newTestCommand() *cobra.Command { } cmd.RunE = func(cmd *cobra.Command, args []string) error { - return fmt.Errorf("TODO") + return errors.New("TODO") // results := project.RunPythonOnDev(cmd.Context(), `return 1`) // if results.Failed() { // return results.Err() diff --git a/cmd/bundle/validate.go b/cmd/bundle/validate.go index 5331e7e7b..41fa87f30 100644 --- a/cmd/bundle/validate.go +++ b/cmd/bundle/validate.go @@ -2,6 +2,7 @@ package bundle import ( "encoding/json" + "errors" "fmt" "github.com/databricks/cli/bundle" @@ -10,18 +11,17 @@ import ( "github.com/databricks/cli/bundle/render" "github.com/databricks/cli/cmd/bundle/utils" "github.com/databricks/cli/cmd/root" - "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/flags" "github.com/spf13/cobra" ) -func renderJsonOutput(cmd *cobra.Command, b *bundle.Bundle, diags diag.Diagnostics) error { +func renderJsonOutput(cmd *cobra.Command, b *bundle.Bundle) error { buf, err := json.MarshalIndent(b.Config.Value().AsAny(), "", " ") if err != nil { return err } - cmd.OutOrStdout().Write(buf) - return diags.Error() + _, _ = cmd.OutOrStdout().Write(buf) + return nil } func newValidateCommand() *cobra.Command { @@ -39,7 +39,7 @@ func newValidateCommand() *cobra.Command { if err := diags.Error(); err != nil { return diags.Error() } else { - return fmt.Errorf("invariant failed: returned bundle is nil") + return errors.New("invariant failed: returned bundle is nil") } } @@ -65,7 +65,23 @@ func newValidateCommand() *cobra.Command { return nil case flags.OutputJSON: - return renderJsonOutput(cmd, b, diags) + renderOpts := render.RenderOptions{RenderSummaryTable: false} + err1 := render.RenderDiagnostics(cmd.ErrOrStderr(), b, diags, renderOpts) + err2 := renderJsonOutput(cmd, b) + + if err2 != nil { + return err2 + } + + if err1 != nil { + return err1 + } + + if diags.HasError() { + return root.ErrAlreadyPrinted + } + + return nil default: return fmt.Errorf("unknown output type %s", root.OutputType(cmd)) } diff --git a/cmd/configure/configure.go b/cmd/configure/configure.go index 895a5902c..4a6568d06 100644 --- a/cmd/configure/configure.go +++ b/cmd/configure/configure.go @@ -1,6 +1,7 @@ package configure import ( + "errors" "fmt" "github.com/databricks/cli/libs/cmdio" @@ -62,12 +63,12 @@ func configureInteractive(cmd *cobra.Command, flags *configureFlags, cfg *config func configureNonInteractive(cmd *cobra.Command, flags *configureFlags, cfg *config.Config) error { if cfg.Host == "" { - return fmt.Errorf("host must be set in non-interactive mode") + return errors.New("host must be set in non-interactive mode") } // Check presence of cluster ID before reading token to fail fast. if flags.ConfigureCluster && cfg.ClusterID == "" { - return fmt.Errorf("cluster ID must be set in non-interactive mode") + return errors.New("cluster ID must be set in non-interactive mode") } // Read token from stdin if not already set. diff --git a/cmd/configure/configure_test.go b/cmd/configure/configure_test.go index a127fe57a..14eb0674a 100644 --- a/cmd/configure/configure_test.go +++ b/cmd/configure/configure_test.go @@ -31,7 +31,7 @@ func setup(t *testing.T) string { return tempHomeDir } -func getTempFileWithContent(t *testing.T, tempHomeDir string, content string) *os.File { +func getTempFileWithContent(t *testing.T, tempHomeDir, content string) *os.File { inp, err := os.CreateTemp(tempHomeDir, "input") assert.NoError(t, err) _, err = inp.WriteString(content) @@ -75,7 +75,7 @@ func TestDefaultConfigureNoInteractive(t *testing.T) { } func TestConfigFileFromEnvNoInteractive(t *testing.T) { - //TODO: Replace with similar test code from go SDK, once we start using it directly + // TODO: Replace with similar test code from go SDK, once we start using it directly ctx := context.Background() tempHomeDir := setup(t) defaultCfgPath := filepath.Join(tempHomeDir, ".databrickscfg") @@ -148,9 +148,9 @@ func TestEnvVarsConfigureNoInteractive(t *testing.T) { // We should only save host and token for a profile, other env variables should not be saved _, err = defaultSection.GetKey("auth_type") - assert.NotNil(t, err) + assert.Error(t, err) _, err = defaultSection.GetKey("metadata_service_url") - assert.NotNil(t, err) + assert.Error(t, err) } func TestEnvVarsConfigureNoArgsNoInteractive(t *testing.T) { diff --git a/cmd/configure/host.go b/cmd/configure/host.go index 781c12387..0a454c6d1 100644 --- a/cmd/configure/host.go +++ b/cmd/configure/host.go @@ -1,7 +1,7 @@ package configure import ( - "fmt" + "errors" "net/url" ) @@ -11,10 +11,10 @@ func validateHost(s string) error { return err } if u.Host == "" || u.Scheme != "https" { - return fmt.Errorf("must start with https://") + return errors.New("must start with https://") } if u.Path != "" && u.Path != "/" { - return fmt.Errorf("must use empty path") + return errors.New("must use empty path") } return nil } diff --git a/cmd/labs/github/github.go b/cmd/labs/github/github.go index 1dd9fae5e..a67df1022 100644 --- a/cmd/labs/github/github.go +++ b/cmd/labs/github/github.go @@ -12,12 +12,16 @@ import ( "github.com/databricks/cli/libs/log" ) -const gitHubAPI = "https://api.github.com" -const gitHubUserContent = "https://raw.githubusercontent.com" +const ( + gitHubAPI = "https://api.github.com" + gitHubUserContent = "https://raw.githubusercontent.com" +) // Placeholders to use as unique keys in context.Context. -var apiOverride int -var userContentOverride int +var ( + apiOverride int + userContentOverride int +) func WithApiOverride(ctx context.Context, override string) context.Context { return context.WithValue(ctx, &apiOverride, override) diff --git a/cmd/labs/github/ref_test.go b/cmd/labs/github/ref_test.go index 2a9ffcc5b..9668cd7ec 100644 --- a/cmd/labs/github/ref_test.go +++ b/cmd/labs/github/ref_test.go @@ -12,7 +12,10 @@ import ( func TestFileFromRef(t *testing.T) { server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.Path == "/databrickslabs/ucx/main/README.md" { - w.Write([]byte(`abc`)) + _, err := w.Write([]byte(`abc`)) + if !assert.NoError(t, err) { + return + } return } t.Logf("Requested: %s", r.URL.Path) @@ -31,7 +34,10 @@ func TestFileFromRef(t *testing.T) { func TestDownloadZipball(t *testing.T) { server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.Path == "/repos/databrickslabs/ucx/zipball/main" { - w.Write([]byte(`abc`)) + _, err := w.Write([]byte(`abc`)) + if !assert.NoError(t, err) { + return + } return } t.Logf("Requested: %s", r.URL.Path) diff --git a/cmd/labs/github/releases_test.go b/cmd/labs/github/releases_test.go index ea24a1e2e..93ac33aee 100644 --- a/cmd/labs/github/releases_test.go +++ b/cmd/labs/github/releases_test.go @@ -12,7 +12,10 @@ import ( func TestLoadsReleasesForCLI(t *testing.T) { server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.Path == "/repos/databricks/cli/releases" { - w.Write([]byte(`[{"tag_name": "v1.2.3"}, {"tag_name": "v1.2.2"}]`)) + _, err := w.Write([]byte(`[{"tag_name": "v1.2.3"}, {"tag_name": "v1.2.2"}]`)) + if !assert.NoError(t, err) { + return + } return } t.Logf("Requested: %s", r.URL.Path) diff --git a/cmd/labs/github/repositories.go b/cmd/labs/github/repositories.go index 850cdb1cb..afdf7aeb2 100644 --- a/cmd/labs/github/repositories.go +++ b/cmd/labs/github/repositories.go @@ -12,7 +12,7 @@ import ( const repositoryCacheTTL = 24 * time.Hour func NewRepositoryCache(org, cacheDir string) *repositoryCache { - filename := fmt.Sprintf("%s-repositories", org) + filename := org + "-repositories" return &repositoryCache{ cache: localcache.NewLocalCache[Repositories](cacheDir, filename, repositoryCacheTTL), Org: org, diff --git a/cmd/labs/github/repositories_test.go b/cmd/labs/github/repositories_test.go index 4f2fef3e1..29ec2ce03 100644 --- a/cmd/labs/github/repositories_test.go +++ b/cmd/labs/github/repositories_test.go @@ -12,7 +12,8 @@ import ( func TestRepositories(t *testing.T) { server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.Path == "/users/databrickslabs/repos" { - w.Write([]byte(`[{"name": "x"}]`)) + _, err := w.Write([]byte(`[{"name": "x"}]`)) + assert.NoError(t, err) return } t.Logf("Requested: %s", r.URL.Path) @@ -26,5 +27,5 @@ func TestRepositories(t *testing.T) { r := NewRepositoryCache("databrickslabs", t.TempDir()) all, err := r.Load(ctx) assert.NoError(t, err) - assert.True(t, len(all) > 0) + assert.NotEmpty(t, all) } diff --git a/cmd/labs/installed.go b/cmd/labs/installed.go index e4249c9ff..9982cc1f0 100644 --- a/cmd/labs/installed.go +++ b/cmd/labs/installed.go @@ -1,6 +1,7 @@ package labs import ( + "errors" "fmt" "github.com/databricks/cli/cmd/labs/project" @@ -49,7 +50,7 @@ func newInstalledCommand() *cobra.Command { }) } if len(info.Projects) == 0 { - return fmt.Errorf("no projects installed") + return errors.New("no projects installed") } return cmdio.Render(ctx, info) }, diff --git a/cmd/labs/installed_test.go b/cmd/labs/installed_test.go index 00692f796..3c38e5e11 100644 --- a/cmd/labs/installed_test.go +++ b/cmd/labs/installed_test.go @@ -4,14 +4,14 @@ import ( "context" "testing" - "github.com/databricks/cli/internal" + "github.com/databricks/cli/internal/testcli" "github.com/databricks/cli/libs/env" ) func TestListsInstalledProjects(t *testing.T) { ctx := context.Background() ctx = env.WithUserHomeDir(ctx, "project/testdata/installed-in-home") - r := internal.NewCobraTestRunnerWithContext(t, ctx, "labs", "installed") + r := testcli.NewRunner(t, ctx, "labs", "installed") r.RunAndExpectOutput(` Name Description Version blueprint Blueprint Project v0.3.15 diff --git a/cmd/labs/list_test.go b/cmd/labs/list_test.go index 925b984ab..4388fdd0e 100644 --- a/cmd/labs/list_test.go +++ b/cmd/labs/list_test.go @@ -4,7 +4,7 @@ import ( "context" "testing" - "github.com/databricks/cli/internal" + "github.com/databricks/cli/internal/testcli" "github.com/databricks/cli/libs/env" "github.com/stretchr/testify/require" ) @@ -12,7 +12,7 @@ import ( func TestListingWorks(t *testing.T) { ctx := context.Background() ctx = env.WithUserHomeDir(ctx, "project/testdata/installed-in-home") - c := internal.NewCobraTestRunnerWithContext(t, ctx, "labs", "list") + c := testcli.NewRunner(t, ctx, "labs", "list") stdout, _, err := c.Run() require.NoError(t, err) require.Contains(t, stdout.String(), "ucx") diff --git a/cmd/labs/localcache/jsonfile.go b/cmd/labs/localcache/jsonfile.go index 495743a57..50ed372f5 100644 --- a/cmd/labs/localcache/jsonfile.go +++ b/cmd/labs/localcache/jsonfile.go @@ -14,8 +14,10 @@ import ( "github.com/databricks/cli/libs/log" ) -const userRW = 0o600 -const ownerRWXworldRX = 0o755 +const ( + userRW = 0o600 + ownerRWXworldRX = 0o755 +) func NewLocalCache[T any](dir, name string, validity time.Duration) LocalCache[T] { return LocalCache[T]{ @@ -91,7 +93,7 @@ func (r *LocalCache[T]) writeCache(ctx context.Context, data T) (T, error) { } func (r *LocalCache[T]) FileName() string { - return filepath.Join(r.dir, fmt.Sprintf("%s.json", r.name)) + return filepath.Join(r.dir, r.name+".json") } func (r *LocalCache[T]) loadCache() (*cached[T], error) { diff --git a/cmd/labs/localcache/jsonfile_test.go b/cmd/labs/localcache/jsonfile_test.go index 0d852174c..8172b7d14 100644 --- a/cmd/labs/localcache/jsonfile_test.go +++ b/cmd/labs/localcache/jsonfile_test.go @@ -3,7 +3,6 @@ package localcache import ( "context" "errors" - "fmt" "net/url" "runtime" "testing" @@ -22,7 +21,7 @@ func TestCreatesDirectoryIfNeeded(t *testing.T) { } first, err := c.Load(ctx, tick) assert.NoError(t, err) - assert.Equal(t, first, int64(1)) + assert.Equal(t, int64(1), first) } func TestImpossibleToCreateDir(t *testing.T) { @@ -115,7 +114,7 @@ func TestFolderDisappears(t *testing.T) { func TestRefreshFails(t *testing.T) { c := NewLocalCache[int64](t.TempDir(), "time", 1*time.Minute) tick := func() (int64, error) { - return 0, fmt.Errorf("nope") + return 0, errors.New("nope") } ctx := context.Background() _, err := c.Load(ctx, tick) diff --git a/cmd/labs/project/command_test.go b/cmd/labs/project/command_test.go index 20021879f..453329e1d 100644 --- a/cmd/labs/project/command_test.go +++ b/cmd/labs/project/command_test.go @@ -6,7 +6,7 @@ import ( "testing" "time" - "github.com/databricks/cli/internal" + "github.com/databricks/cli/internal/testcli" "github.com/databricks/cli/libs/env" "github.com/databricks/cli/libs/python" "github.com/databricks/databricks-sdk-go" @@ -30,7 +30,7 @@ func devEnvContext(t *testing.T) context.Context { func TestRunningBlueprintEcho(t *testing.T) { ctx := devEnvContext(t) - r := internal.NewCobraTestRunnerWithContext(t, ctx, "labs", "blueprint", "echo") + r := testcli.NewRunner(t, ctx, "labs", "blueprint", "echo") var out echoOut r.RunAndParseJSON(&out) assert.Equal(t, "echo", out.Command) @@ -41,14 +41,14 @@ func TestRunningBlueprintEcho(t *testing.T) { func TestRunningBlueprintEchoProfileWrongOverride(t *testing.T) { ctx := devEnvContext(t) - r := internal.NewCobraTestRunnerWithContext(t, ctx, "labs", "blueprint", "echo", "--profile", "workspace-profile") + r := testcli.NewRunner(t, ctx, "labs", "blueprint", "echo", "--profile", "workspace-profile") _, _, err := r.Run() assert.ErrorIs(t, err, databricks.ErrNotAccountClient) } func TestRunningCommand(t *testing.T) { ctx := devEnvContext(t) - r := internal.NewCobraTestRunnerWithContext(t, ctx, "labs", "blueprint", "foo") + r := testcli.NewRunner(t, ctx, "labs", "blueprint", "foo") r.WithStdin() defer r.CloseStdin() @@ -60,7 +60,7 @@ func TestRunningCommand(t *testing.T) { func TestRenderingTable(t *testing.T) { ctx := devEnvContext(t) - r := internal.NewCobraTestRunnerWithContext(t, ctx, "labs", "blueprint", "table") + r := testcli.NewRunner(t, ctx, "labs", "blueprint", "table") r.RunAndExpectOutput(` Key Value First Second diff --git a/cmd/labs/project/entrypoint.go b/cmd/labs/project/entrypoint.go index 99edf83c8..2bed49145 100644 --- a/cmd/labs/project/entrypoint.go +++ b/cmd/labs/project/entrypoint.go @@ -30,10 +30,12 @@ type Entrypoint struct { IsBundleAware bool `yaml:"is_bundle_aware,omitempty"` } -var ErrNoLoginConfig = errors.New("no login configuration found") -var ErrMissingClusterID = errors.New("missing a cluster compatible with Databricks Connect") -var ErrMissingWarehouseID = errors.New("missing a SQL warehouse") -var ErrNotInTTY = errors.New("not in an interactive terminal") +var ( + ErrNoLoginConfig = errors.New("no login configuration found") + ErrMissingClusterID = errors.New("missing a cluster compatible with Databricks Connect") + ErrMissingWarehouseID = errors.New("missing a SQL warehouse") + ErrNotInTTY = errors.New("not in an interactive terminal") +) func (e *Entrypoint) NeedsCluster() bool { if e.Installer == nil { @@ -190,9 +192,6 @@ func (e *Entrypoint) getLoginConfig(cmd *cobra.Command) (*loginConfig, *config.C if isNoLoginConfig && !e.IsBundleAware { return nil, nil, ErrNoLoginConfig } - if !isNoLoginConfig && err != nil { - return nil, nil, fmt.Errorf("load: %w", err) - } if e.IsAccountLevel { log.Debugf(ctx, "Using account-level login profile: %s", lc.AccountProfile) cfg, err := e.envAwareConfigWithProfile(ctx, lc.AccountProfile) diff --git a/cmd/labs/project/installer.go b/cmd/labs/project/installer.go index 041415964..7d31623bb 100644 --- a/cmd/labs/project/installer.go +++ b/cmd/labs/project/installer.go @@ -175,7 +175,7 @@ func (i *installer) login(ctx context.Context) (*databricks.WorkspaceClient, err return nil, fmt.Errorf("valid: %w", err) } if !i.HasAccountLevelCommands() && cfg.IsAccountClient() { - return nil, fmt.Errorf("got account-level client, but no account-level commands") + return nil, errors.New("got account-level client, but no account-level commands") } lc := &loginConfig{Entrypoint: i.Installer.Entrypoint} w, err := lc.askWorkspace(ctx, cfg) @@ -200,10 +200,10 @@ func (i *installer) downloadLibrary(ctx context.Context) error { libTarget := i.LibDir() // we may support wheels, jars, and golang binaries. but those are not zipballs if i.IsZipball() { - feedback <- fmt.Sprintf("Downloading and unpacking zipball for %s", i.version) + feedback <- "Downloading and unpacking zipball for " + i.version return i.downloadAndUnpackZipball(ctx, libTarget) } - return fmt.Errorf("we only support zipballs for now") + return errors.New("we only support zipballs for now") } func (i *installer) downloadAndUnpackZipball(ctx context.Context, libTarget string) error { @@ -234,7 +234,7 @@ func (i *installer) setupPythonVirtualEnvironment(ctx context.Context, w *databr log.Debugf(ctx, "Detected Python %s at: %s", py.Version, py.Path) venvPath := i.virtualEnvPath(ctx) log.Debugf(ctx, "Creating Python Virtual Environment at: %s", venvPath) - feedback <- fmt.Sprintf("Creating Virtual Environment with Python %s", py.Version) + feedback <- "Creating Virtual Environment with Python " + py.Version _, err = process.Background(ctx, []string{py.Path, "-m", "venv", venvPath}) if err != nil { return fmt.Errorf("create venv: %w", err) @@ -251,8 +251,8 @@ func (i *installer) setupPythonVirtualEnvironment(ctx context.Context, w *databr if !ok { return fmt.Errorf("unsupported runtime: %s", cluster.SparkVersion) } - feedback <- fmt.Sprintf("Installing Databricks Connect v%s", runtimeVersion) - pipSpec := fmt.Sprintf("databricks-connect==%s", runtimeVersion) + feedback <- "Installing Databricks Connect v" + runtimeVersion + pipSpec := "databricks-connect==" + runtimeVersion err = i.installPythonDependencies(ctx, pipSpec) if err != nil { return fmt.Errorf("dbconnect: %w", err) diff --git a/cmd/labs/project/installer_test.go b/cmd/labs/project/installer_test.go index 1e45fafe6..a01ba864a 100644 --- a/cmd/labs/project/installer_test.go +++ b/cmd/labs/project/installer_test.go @@ -19,18 +19,21 @@ import ( "github.com/databricks/cli/cmd/labs/github" "github.com/databricks/cli/cmd/labs/project" - "github.com/databricks/cli/internal" + "github.com/databricks/cli/internal/testcli" "github.com/databricks/cli/libs/env" "github.com/databricks/cli/libs/process" "github.com/databricks/cli/libs/python" "github.com/databricks/databricks-sdk-go/service/compute" "github.com/databricks/databricks-sdk-go/service/iam" "github.com/databricks/databricks-sdk-go/service/sql" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) -const ownerRWXworldRX = 0o755 -const ownerRW = 0o600 +const ( + ownerRWXworldRX = 0o755 + ownerRW = 0o600 +) func zipballFromFolder(src string) ([]byte, error) { var buf bytes.Buffer @@ -117,10 +120,10 @@ func installerContext(t *testing.T, server *httptest.Server) context.Context { func respondWithJSON(t *testing.T, w http.ResponseWriter, v any) { raw, err := json.Marshal(v) - if err != nil { - require.NoError(t, err) - } - w.Write(raw) + require.NoError(t, err) + + _, err = w.Write(raw) + require.NoError(t, err) } type fileTree struct { @@ -167,19 +170,17 @@ func TestInstallerWorksForReleases(t *testing.T) { server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.Path == "/databrickslabs/blueprint/v0.3.15/labs.yml" { raw, err := os.ReadFile("testdata/installed-in-home/.databricks/labs/blueprint/lib/labs.yml") - if err != nil { - panic(err) - } - w.Write(raw) + assert.NoError(t, err) + _, err = w.Write(raw) + assert.NoError(t, err) return } if r.URL.Path == "/repos/databrickslabs/blueprint/zipball/v0.3.15" { raw, err := zipballFromFolder("testdata/installed-in-home/.databricks/labs/blueprint/lib") - if err != nil { - panic(err) - } + assert.NoError(t, err) w.Header().Add("Content-Type", "application/octet-stream") - w.Write(raw) + _, err = w.Write(raw) + assert.NoError(t, err) return } if r.URL.Path == "/api/2.1/clusters/get" { @@ -236,7 +237,7 @@ func TestInstallerWorksForReleases(t *testing.T) { // │ │ │ └── site-packages // │ │ │ ├── ... // │ │ │ ├── distutils-precedence.pth - r := internal.NewCobraTestRunnerWithContext(t, ctx, "labs", "install", "blueprint", "--debug") + r := testcli.NewRunner(t, ctx, "labs", "install", "blueprint", "--debug") r.RunAndExpectOutput("setting up important infrastructure") } @@ -314,7 +315,10 @@ func TestInstallerWorksForDevelopment(t *testing.T) { defer server.Close() wd, _ := os.Getwd() - defer os.Chdir(wd) + defer func() { + err := os.Chdir(wd) + require.NoError(t, err) + }() devDir := copyTestdata(t, "testdata/installed-in-home/.databricks/labs/blueprint/lib") err := os.Chdir(devDir) @@ -353,7 +357,7 @@ account_id = abc // └── databrickslabs-blueprint-releases.json // `databricks labs install .` means "verify this installer i'm developing does work" - r := internal.NewCobraTestRunnerWithContext(t, ctx, "labs", "install", ".") + r := testcli.NewRunner(t, ctx, "labs", "install", ".") r.WithStdin() defer r.CloseStdin() @@ -373,19 +377,17 @@ func TestUpgraderWorksForReleases(t *testing.T) { server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.Path == "/databrickslabs/blueprint/v0.4.0/labs.yml" { raw, err := os.ReadFile("testdata/installed-in-home/.databricks/labs/blueprint/lib/labs.yml") - if err != nil { - panic(err) - } - w.Write(raw) + assert.NoError(t, err) + _, err = w.Write(raw) + assert.NoError(t, err) return } if r.URL.Path == "/repos/databrickslabs/blueprint/zipball/v0.4.0" { raw, err := zipballFromFolder("testdata/installed-in-home/.databricks/labs/blueprint/lib") - if err != nil { - panic(err) - } + assert.NoError(t, err) w.Header().Add("Content-Type", "application/octet-stream") - w.Write(raw) + _, err = w.Write(raw) + assert.NoError(t, err) return } if r.URL.Path == "/api/2.1/clusters/get" { @@ -425,7 +427,7 @@ func TestUpgraderWorksForReleases(t *testing.T) { ctx = env.Set(ctx, "DATABRICKS_CLUSTER_ID", "installer-cluster") ctx = env.Set(ctx, "DATABRICKS_WAREHOUSE_ID", "installer-warehouse") - r := internal.NewCobraTestRunnerWithContext(t, ctx, "labs", "upgrade", "blueprint") + r := testcli.NewRunner(t, ctx, "labs", "upgrade", "blueprint") r.RunAndExpectOutput("setting up important infrastructure") // Check if the stub was called with the 'python -m pip install' command diff --git a/cmd/labs/show.go b/cmd/labs/show.go index c36f0bda3..e8c876d8b 100644 --- a/cmd/labs/show.go +++ b/cmd/labs/show.go @@ -1,7 +1,7 @@ package labs import ( - "fmt" + "errors" "github.com/databricks/cli/cmd/labs/project" "github.com/databricks/cli/cmd/root" @@ -34,7 +34,7 @@ func newShowCommand() *cobra.Command { return err } if len(installed) == 0 { - return fmt.Errorf("no projects found") + return errors.New("no projects found") } name := args[0] for _, v := range installed { diff --git a/cmd/root/auth.go b/cmd/root/auth.go index 107679105..49abfd414 100644 --- a/cmd/root/auth.go +++ b/cmd/root/auth.go @@ -15,16 +15,18 @@ import ( ) // Placeholders to use as unique keys in context.Context. -var workspaceClient int -var accountClient int -var configUsed int +var ( + workspaceClient int + accountClient int + configUsed int +) type ErrNoWorkspaceProfiles struct { path string } func (e ErrNoWorkspaceProfiles) Error() string { - return fmt.Sprintf("%s does not contain workspace profiles; please create one by running 'databricks configure'", e.path) + return e.path + " does not contain workspace profiles; please create one by running 'databricks configure'" } type ErrNoAccountProfiles struct { @@ -32,7 +34,7 @@ type ErrNoAccountProfiles struct { } func (e ErrNoAccountProfiles) Error() string { - return fmt.Sprintf("%s does not contain account profiles", e.path) + return e.path + " does not contain account profiles" } func initProfileFlag(cmd *cobra.Command) { @@ -251,7 +253,7 @@ func AskForWorkspaceProfile(ctx context.Context) (string, error) { return profiles[0].Name, nil } i, _, err := cmdio.RunSelect(ctx, &promptui.Select{ - Label: fmt.Sprintf("Workspace profiles defined in %s", path), + Label: "Workspace profiles defined in " + path, Items: profiles, Searcher: profiles.SearchCaseInsensitive, StartInSearchMode: true, @@ -285,7 +287,7 @@ func AskForAccountProfile(ctx context.Context) (string, error) { return profiles[0].Name, nil } i, _, err := cmdio.RunSelect(ctx, &promptui.Select{ - Label: fmt.Sprintf("Account profiles defined in %s", path), + Label: "Account profiles defined in " + path, Items: profiles, Searcher: profiles.SearchCaseInsensitive, StartInSearchMode: true, diff --git a/cmd/root/auth_test.go b/cmd/root/auth_test.go index 9ba2a8fa9..784598796 100644 --- a/cmd/root/auth_test.go +++ b/cmd/root/auth_test.go @@ -15,7 +15,8 @@ import ( ) func TestEmptyHttpRequest(t *testing.T) { - ctx, _ := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() req := emptyHttpRequest(ctx) assert.Equal(t, req.Context(), ctx) } @@ -83,7 +84,7 @@ func TestAccountClientOrPrompt(t *testing.T) { account_id = 1112 token = foobar `), - 0755) + 0o755) require.NoError(t, err) t.Setenv("DATABRICKS_CONFIG_FILE", configFile) t.Setenv("PATH", "/nothing") @@ -149,7 +150,7 @@ func TestWorkspaceClientOrPrompt(t *testing.T) { host = https://adb-1112.12.azuredatabricks.net/ token = foobar `), - 0755) + 0o755) require.NoError(t, err) t.Setenv("DATABRICKS_CONFIG_FILE", configFile) t.Setenv("PATH", "/nothing") @@ -203,7 +204,7 @@ func TestMustAccountClientWorksWithDatabricksCfg(t *testing.T) { account_id = 1111 token = foobar `), - 0755) + 0o755) require.NoError(t, err) cmd := New(context.Background()) @@ -250,7 +251,7 @@ func TestMustAnyClientCanCreateWorkspaceClient(t *testing.T) { host = https://adb-1111.11.azuredatabricks.net/ token = foobar `), - 0755) + 0o755) require.NoError(t, err) ctx, tt := cmdio.SetupTest(context.Background()) @@ -279,7 +280,7 @@ func TestMustAnyClientCanCreateAccountClient(t *testing.T) { account_id = 1111 token = foobar `), - 0755) + 0o755) require.NoError(t, err) ctx, tt := cmdio.SetupTest(context.Background()) @@ -303,7 +304,7 @@ func TestMustAnyClientWithEmptyDatabricksCfg(t *testing.T) { err := os.WriteFile( configFile, []byte(""), // empty file - 0755) + 0o755) require.NoError(t, err) ctx, tt := cmdio.SetupTest(context.Background()) diff --git a/cmd/root/bundle_test.go b/cmd/root/bundle_test.go index 301884287..1998b19e6 100644 --- a/cmd/root/bundle_test.go +++ b/cmd/root/bundle_test.go @@ -23,7 +23,7 @@ func setupDatabricksCfg(t *testing.T) { } cfg := []byte("[PROFILE-1]\nhost = https://a.com\ntoken = a\n[PROFILE-2]\nhost = https://a.com\ntoken = b\n") - err := os.WriteFile(filepath.Join(tempHomeDir, ".databrickscfg"), cfg, 0644) + err := os.WriteFile(filepath.Join(tempHomeDir, ".databrickscfg"), cfg, 0o644) assert.NoError(t, err) t.Setenv("DATABRICKS_CONFIG_FILE", "") @@ -48,7 +48,7 @@ func setupWithHost(t *testing.T, cmd *cobra.Command, host string) *bundle.Bundle workspace: host: %q `, host) - err := os.WriteFile(filepath.Join(rootPath, "databricks.yml"), []byte(contents), 0644) + err := os.WriteFile(filepath.Join(rootPath, "databricks.yml"), []byte(contents), 0o644) require.NoError(t, err) b, diags := MustConfigureBundle(cmd) @@ -66,7 +66,7 @@ func setupWithProfile(t *testing.T, cmd *cobra.Command, profile string) *bundle. workspace: profile: %q `, profile) - err := os.WriteFile(filepath.Join(rootPath, "databricks.yml"), []byte(contents), 0644) + err := os.WriteFile(filepath.Join(rootPath, "databricks.yml"), []byte(contents), 0o644) require.NoError(t, err) b, diags := MustConfigureBundle(cmd) @@ -99,10 +99,11 @@ func TestBundleConfigureWithNonExistentProfileFlag(t *testing.T) { testutil.CleanupEnvironment(t) cmd := emptyCommand(t) - cmd.Flag("profile").Value.Set("NOEXIST") + err := cmd.Flag("profile").Value.Set("NOEXIST") + require.NoError(t, err) b := setupWithHost(t, cmd, "https://x.com") - _, err := b.InitializeWorkspaceClient() + _, err = b.InitializeWorkspaceClient() assert.ErrorContains(t, err, "has no NOEXIST profile configured") } @@ -110,10 +111,11 @@ func TestBundleConfigureWithMismatchedProfile(t *testing.T) { testutil.CleanupEnvironment(t) cmd := emptyCommand(t) - cmd.Flag("profile").Value.Set("PROFILE-1") + err := cmd.Flag("profile").Value.Set("PROFILE-1") + require.NoError(t, err) b := setupWithHost(t, cmd, "https://x.com") - _, err := b.InitializeWorkspaceClient() + _, err = b.InitializeWorkspaceClient() assert.ErrorContains(t, err, "config host mismatch: profile uses host https://a.com, but CLI configured to use https://x.com") } @@ -121,7 +123,8 @@ func TestBundleConfigureWithCorrectProfile(t *testing.T) { testutil.CleanupEnvironment(t) cmd := emptyCommand(t) - cmd.Flag("profile").Value.Set("PROFILE-1") + err := cmd.Flag("profile").Value.Set("PROFILE-1") + require.NoError(t, err) b := setupWithHost(t, cmd, "https://a.com") client, err := b.InitializeWorkspaceClient() @@ -146,7 +149,8 @@ func TestBundleConfigureWithProfileFlagAndEnvVariable(t *testing.T) { t.Setenv("DATABRICKS_CONFIG_PROFILE", "NOEXIST") cmd := emptyCommand(t) - cmd.Flag("profile").Value.Set("PROFILE-1") + err := cmd.Flag("profile").Value.Set("PROFILE-1") + require.NoError(t, err) b := setupWithHost(t, cmd, "https://a.com") client, err := b.InitializeWorkspaceClient() @@ -174,7 +178,8 @@ func TestBundleConfigureProfileFlag(t *testing.T) { // The --profile flag takes precedence over the profile in the databricks.yml file cmd := emptyCommand(t) - cmd.Flag("profile").Value.Set("PROFILE-2") + err := cmd.Flag("profile").Value.Set("PROFILE-2") + require.NoError(t, err) b := setupWithProfile(t, cmd, "PROFILE-1") client, err := b.InitializeWorkspaceClient() @@ -205,7 +210,8 @@ func TestBundleConfigureProfileFlagAndEnvVariable(t *testing.T) { // The --profile flag takes precedence over the DATABRICKS_CONFIG_PROFILE environment variable t.Setenv("DATABRICKS_CONFIG_PROFILE", "NOEXIST") cmd := emptyCommand(t) - cmd.Flag("profile").Value.Set("PROFILE-2") + err := cmd.Flag("profile").Value.Set("PROFILE-2") + require.NoError(t, err) b := setupWithProfile(t, cmd, "PROFILE-1") client, err := b.InitializeWorkspaceClient() diff --git a/cmd/root/io.go b/cmd/root/io.go index b224bbb27..bba989a79 100644 --- a/cmd/root/io.go +++ b/cmd/root/io.go @@ -21,7 +21,7 @@ func initOutputFlag(cmd *cobra.Command) *outputFlag { // Configure defaults from environment, if applicable. // If the provided value is invalid it is ignored. if v, ok := env.Lookup(cmd.Context(), envOutputFormat); ok { - f.output.Set(v) + f.output.Set(v) //nolint:errcheck } cmd.PersistentFlags().VarP(&f.output, "output", "o", "output type: text or json") @@ -45,8 +45,9 @@ func (f *outputFlag) initializeIO(cmd *cobra.Command) error { headerTemplate = cmd.Annotations["headerTemplate"] } - cmdIO := cmdio.NewIO(f.output, cmd.InOrStdin(), cmd.OutOrStdout(), cmd.ErrOrStderr(), headerTemplate, template) - ctx := cmdio.InContext(cmd.Context(), cmdIO) + ctx := cmd.Context() + cmdIO := cmdio.NewIO(ctx, f.output, cmd.InOrStdin(), cmd.OutOrStdout(), cmd.ErrOrStderr(), headerTemplate, template) + ctx = cmdio.InContext(ctx, cmdIO) cmd.SetContext(ctx) return nil } diff --git a/cmd/root/logger.go b/cmd/root/logger.go index 48cb99a37..38e09b9c9 100644 --- a/cmd/root/logger.go +++ b/cmd/root/logger.go @@ -45,7 +45,10 @@ func (f *logFlags) makeLogHandler(opts slog.HandlerOptions) (slog.Handler, error func (f *logFlags) initializeContext(ctx context.Context) (context.Context, error) { if f.debug { - f.level.Set("debug") + err := f.level.Set("debug") + if err != nil { + return nil, err + } } opts := slog.HandlerOptions{} @@ -81,13 +84,13 @@ func initLogFlags(cmd *cobra.Command) *logFlags { // Configure defaults from environment, if applicable. // If the provided value is invalid it is ignored. if v, ok := env.Lookup(cmd.Context(), envLogFile); ok { - f.file.Set(v) + f.file.Set(v) //nolint:errcheck } if v, ok := env.Lookup(cmd.Context(), envLogLevel); ok { - f.level.Set(v) + f.level.Set(v) //nolint:errcheck } if v, ok := env.Lookup(cmd.Context(), envLogFormat); ok { - f.output.Set(v) + f.output.Set(v) //nolint:errcheck } flags := cmd.PersistentFlags() diff --git a/cmd/root/progress_logger.go b/cmd/root/progress_logger.go index 7d6a1fa46..0cc49b2ac 100644 --- a/cmd/root/progress_logger.go +++ b/cmd/root/progress_logger.go @@ -2,7 +2,7 @@ package root import ( "context" - "fmt" + "errors" "os" "github.com/databricks/cli/libs/cmdio" @@ -37,7 +37,7 @@ func (f *progressLoggerFlag) initializeContext(ctx context.Context) (context.Con if f.log.level.String() != "disabled" && f.log.file.String() == "stderr" && f.ProgressLogFormat == flags.ModeInplace { - return nil, fmt.Errorf("inplace progress logging cannot be used when log-file is stderr") + return nil, errors.New("inplace progress logging cannot be used when log-file is stderr") } format := f.ProgressLogFormat @@ -59,7 +59,7 @@ func initProgressLoggerFlag(cmd *cobra.Command, logFlags *logFlags) *progressLog // Configure defaults from environment, if applicable. // If the provided value is invalid it is ignored. if v, ok := env.Lookup(cmd.Context(), envProgressFormat); ok { - f.Set(v) + _ = f.Set(v) } flags := cmd.PersistentFlags() diff --git a/cmd/root/progress_logger_test.go b/cmd/root/progress_logger_test.go index 9dceee8d5..42ba1bdc6 100644 --- a/cmd/root/progress_logger_test.go +++ b/cmd/root/progress_logger_test.go @@ -33,27 +33,27 @@ func initializeProgressLoggerTest(t *testing.T) ( func TestInitializeErrorOnIncompatibleConfig(t *testing.T) { plt, logLevel, logFile, progressFormat := initializeProgressLoggerTest(t) - logLevel.Set("info") - logFile.Set("stderr") - progressFormat.Set("inplace") + require.NoError(t, logLevel.Set("info")) + require.NoError(t, logFile.Set("stderr")) + require.NoError(t, progressFormat.Set("inplace")) _, err := plt.progressLoggerFlag.initializeContext(context.Background()) assert.ErrorContains(t, err, "inplace progress logging cannot be used when log-file is stderr") } func TestNoErrorOnDisabledLogLevel(t *testing.T) { plt, logLevel, logFile, progressFormat := initializeProgressLoggerTest(t) - logLevel.Set("disabled") - logFile.Set("stderr") - progressFormat.Set("inplace") + require.NoError(t, logLevel.Set("disabled")) + require.NoError(t, logFile.Set("stderr")) + require.NoError(t, progressFormat.Set("inplace")) _, err := plt.progressLoggerFlag.initializeContext(context.Background()) assert.NoError(t, err) } func TestNoErrorOnNonStderrLogFile(t *testing.T) { plt, logLevel, logFile, progressFormat := initializeProgressLoggerTest(t) - logLevel.Set("info") - logFile.Set("stdout") - progressFormat.Set("inplace") + require.NoError(t, logLevel.Set("info")) + require.NoError(t, logFile.Set("stdout")) + require.NoError(t, progressFormat.Set("inplace")) _, err := plt.progressLoggerFlag.initializeContext(context.Background()) assert.NoError(t, err) } diff --git a/cmd/root/root.go b/cmd/root/root.go index e6f66f126..3b37d0176 100644 --- a/cmd/root/root.go +++ b/cmd/root/root.go @@ -4,11 +4,10 @@ import ( "context" "errors" "fmt" + "log/slog" "os" "strings" - "log/slog" - "github.com/databricks/cli/internal/build" "github.com/databricks/cli/libs/cmdio" "github.com/databricks/cli/libs/dbr" diff --git a/cmd/root/user_agent_upstream.go b/cmd/root/user_agent_upstream.go index f580b4263..a813e8ee7 100644 --- a/cmd/root/user_agent_upstream.go +++ b/cmd/root/user_agent_upstream.go @@ -8,12 +8,16 @@ import ( ) // Environment variables that caller can set to convey what is upstream to this CLI. -const upstreamEnvVar = "DATABRICKS_CLI_UPSTREAM" -const upstreamVersionEnvVar = "DATABRICKS_CLI_UPSTREAM_VERSION" +const ( + upstreamEnvVar = "DATABRICKS_CLI_UPSTREAM" + upstreamVersionEnvVar = "DATABRICKS_CLI_UPSTREAM_VERSION" +) // Keys in the user agent. -const upstreamKey = "upstream" -const upstreamVersionKey = "upstream-version" +const ( + upstreamKey = "upstream" + upstreamVersionKey = "upstream-version" +) func withUpstreamInUserAgent(ctx context.Context) context.Context { value := env.Get(ctx, upstreamEnvVar) diff --git a/cmd/sync/completion.go b/cmd/sync/completion.go index 422147713..5a65dd528 100644 --- a/cmd/sync/completion.go +++ b/cmd/sync/completion.go @@ -2,7 +2,6 @@ package sync import ( "context" - "fmt" "path" "strings" @@ -52,8 +51,8 @@ func completeRemotePath( } prefixes := []string{ - path.Clean(fmt.Sprintf("/Users/%s", me.UserName)) + "/", - path.Clean(fmt.Sprintf("/Repos/%s", me.UserName)) + "/", + path.Clean("/Users/"+me.UserName) + "/", + path.Clean("/Repos/"+me.UserName) + "/", } validPrefix := false diff --git a/cmd/sync/sync.go b/cmd/sync/sync.go index 6d722fb08..dea40f96a 100644 --- a/cmd/sync/sync.go +++ b/cmd/sync/sync.go @@ -2,6 +2,7 @@ package sync import ( "context" + "errors" "flag" "fmt" "io" @@ -29,7 +30,7 @@ type syncFlags struct { func (f *syncFlags) syncOptionsFromBundle(cmd *cobra.Command, args []string, b *bundle.Bundle) (*sync.SyncOptions, error) { if len(args) > 0 { - return nil, fmt.Errorf("SRC and DST are not configurable in the context of a bundle") + return nil, errors.New("SRC and DST are not configurable in the context of a bundle") } opts, err := files.GetSyncOptions(cmd.Context(), bundle.ReadOnly(b)) @@ -68,7 +69,6 @@ func (f *syncFlags) syncOptionsFromArgs(cmd *cobra.Command, args []string) (*syn localRoot := vfs.MustNew(args[0]) info, err := git.FetchRepositoryInfo(ctx, localRoot.Native(), client) - if err != nil { log.Warnf(ctx, "Failed to read git info: %s", err) } diff --git a/cmd/workspace/aibi-dashboard-embedding-access-policy/aibi-dashboard-embedding-access-policy.go b/cmd/workspace/aibi-dashboard-embedding-access-policy/aibi-dashboard-embedding-access-policy.go index b1adf6103..3f905e521 100755 --- a/cmd/workspace/aibi-dashboard-embedding-access-policy/aibi-dashboard-embedding-access-policy.go +++ b/cmd/workspace/aibi-dashboard-embedding-access-policy/aibi-dashboard-embedding-access-policy.go @@ -26,6 +26,7 @@ func New() *cobra.Command { } // Add methods + cmd.AddCommand(newDelete()) cmd.AddCommand(newGet()) cmd.AddCommand(newUpdate()) @@ -37,6 +38,62 @@ func New() *cobra.Command { return cmd } +// start delete command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteOverrides []func( + *cobra.Command, + *settings.DeleteAibiDashboardEmbeddingAccessPolicySettingRequest, +) + +func newDelete() *cobra.Command { + cmd := &cobra.Command{} + + var deleteReq settings.DeleteAibiDashboardEmbeddingAccessPolicySettingRequest + + // TODO: short flags + + cmd.Flags().StringVar(&deleteReq.Etag, "etag", deleteReq.Etag, `etag used for versioning.`) + + cmd.Use = "delete" + cmd.Short = `Delete the AI/BI dashboard embedding access policy.` + cmd.Long = `Delete the AI/BI dashboard embedding access policy. + + Delete the AI/BI dashboard embedding access policy, reverting back to the + default.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + response, err := w.Settings.AibiDashboardEmbeddingAccessPolicy().Delete(ctx, deleteReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deleteOverrides { + fn(cmd, &deleteReq) + } + + return cmd +} + // start get command // Slice with functions to override default command behavior. diff --git a/cmd/workspace/aibi-dashboard-embedding-approved-domains/aibi-dashboard-embedding-approved-domains.go b/cmd/workspace/aibi-dashboard-embedding-approved-domains/aibi-dashboard-embedding-approved-domains.go index 481197460..69db66504 100755 --- a/cmd/workspace/aibi-dashboard-embedding-approved-domains/aibi-dashboard-embedding-approved-domains.go +++ b/cmd/workspace/aibi-dashboard-embedding-approved-domains/aibi-dashboard-embedding-approved-domains.go @@ -26,6 +26,7 @@ func New() *cobra.Command { } // Add methods + cmd.AddCommand(newDelete()) cmd.AddCommand(newGet()) cmd.AddCommand(newUpdate()) @@ -37,6 +38,62 @@ func New() *cobra.Command { return cmd } +// start delete command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteOverrides []func( + *cobra.Command, + *settings.DeleteAibiDashboardEmbeddingApprovedDomainsSettingRequest, +) + +func newDelete() *cobra.Command { + cmd := &cobra.Command{} + + var deleteReq settings.DeleteAibiDashboardEmbeddingApprovedDomainsSettingRequest + + // TODO: short flags + + cmd.Flags().StringVar(&deleteReq.Etag, "etag", deleteReq.Etag, `etag used for versioning.`) + + cmd.Use = "delete" + cmd.Short = `Delete AI/BI dashboard embedding approved domains.` + cmd.Long = `Delete AI/BI dashboard embedding approved domains. + + Delete the list of domains approved to host embedded AI/BI dashboards, + reverting back to the default empty list.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + response, err := w.Settings.AibiDashboardEmbeddingApprovedDomains().Delete(ctx, deleteReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deleteOverrides { + fn(cmd, &deleteReq) + } + + return cmd +} + // start get command // Slice with functions to override default command behavior. diff --git a/cmd/workspace/clean-room-assets/clean-room-assets.go b/cmd/workspace/clean-room-assets/clean-room-assets.go new file mode 100755 index 000000000..872f0ecef --- /dev/null +++ b/cmd/workspace/clean-room-assets/clean-room-assets.go @@ -0,0 +1,419 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package clean_room_assets + +import ( + "fmt" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/cleanrooms" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "clean-room-assets", + Short: `Clean room assets are data and code objects — Tables, volumes, and notebooks that are shared with the clean room.`, + Long: `Clean room assets are data and code objects — Tables, volumes, and notebooks + that are shared with the clean room.`, + GroupID: "cleanrooms", + Annotations: map[string]string{ + "package": "cleanrooms", + }, + } + + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newUpdate()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start create command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var createOverrides []func( + *cobra.Command, + *cleanrooms.CreateCleanRoomAssetRequest, +) + +func newCreate() *cobra.Command { + cmd := &cobra.Command{} + + var createReq cleanrooms.CreateCleanRoomAssetRequest + createReq.Asset = &cleanrooms.CleanRoomAsset{} + var createJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().Var(&createReq.Asset.AssetType, "asset-type", `The type of the asset. Supported values: [FOREIGN_TABLE, NOTEBOOK_FILE, TABLE, VIEW, VOLUME]`) + // TODO: complex arg: foreign_table + // TODO: complex arg: foreign_table_local_details + cmd.Flags().StringVar(&createReq.Asset.Name, "name", createReq.Asset.Name, `A fully qualified name that uniquely identifies the asset within the clean room.`) + // TODO: complex arg: notebook + // TODO: complex arg: table + // TODO: complex arg: table_local_details + // TODO: complex arg: view + // TODO: complex arg: view_local_details + // TODO: complex arg: volume_local_details + + cmd.Use = "create CLEAN_ROOM_NAME" + cmd.Short = `Create an asset.` + cmd.Long = `Create an asset. + + Create a clean room asset —share an asset like a notebook or table into the + clean room. For each UC asset that is added through this method, the clean + room owner must also have enough privilege on the asset to consume it. The + privilege must be maintained indefinitely for the clean room to be able to + access the asset. Typically, you should use a group as the clean room owner. + + Arguments: + CLEAN_ROOM_NAME: Name of the clean room.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + diags := createJson.Unmarshal(&createReq.Asset) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } + } + } + createReq.CleanRoomName = args[0] + + response, err := w.CleanRoomAssets.Create(ctx, createReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range createOverrides { + fn(cmd, &createReq) + } + + return cmd +} + +// start delete command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteOverrides []func( + *cobra.Command, + *cleanrooms.DeleteCleanRoomAssetRequest, +) + +func newDelete() *cobra.Command { + cmd := &cobra.Command{} + + var deleteReq cleanrooms.DeleteCleanRoomAssetRequest + + // TODO: short flags + + cmd.Use = "delete CLEAN_ROOM_NAME ASSET_TYPE ASSET_FULL_NAME" + cmd.Short = `Delete an asset.` + cmd.Long = `Delete an asset. + + Delete a clean room asset - unshare/remove the asset from the clean room + + Arguments: + CLEAN_ROOM_NAME: Name of the clean room. + ASSET_TYPE: The type of the asset. + ASSET_FULL_NAME: The fully qualified name of the asset, it is same as the name field in + CleanRoomAsset.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(3) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + deleteReq.CleanRoomName = args[0] + _, err = fmt.Sscan(args[1], &deleteReq.AssetType) + if err != nil { + return fmt.Errorf("invalid ASSET_TYPE: %s", args[1]) + } + deleteReq.AssetFullName = args[2] + + err = w.CleanRoomAssets.Delete(ctx, deleteReq) + if err != nil { + return err + } + return nil + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deleteOverrides { + fn(cmd, &deleteReq) + } + + return cmd +} + +// start get command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getOverrides []func( + *cobra.Command, + *cleanrooms.GetCleanRoomAssetRequest, +) + +func newGet() *cobra.Command { + cmd := &cobra.Command{} + + var getReq cleanrooms.GetCleanRoomAssetRequest + + // TODO: short flags + + cmd.Use = "get CLEAN_ROOM_NAME ASSET_TYPE ASSET_FULL_NAME" + cmd.Short = `Get an asset.` + cmd.Long = `Get an asset. + + Get the details of a clean room asset by its type and full name. + + Arguments: + CLEAN_ROOM_NAME: Name of the clean room. + ASSET_TYPE: The type of the asset. + ASSET_FULL_NAME: The fully qualified name of the asset, it is same as the name field in + CleanRoomAsset.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(3) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + getReq.CleanRoomName = args[0] + _, err = fmt.Sscan(args[1], &getReq.AssetType) + if err != nil { + return fmt.Errorf("invalid ASSET_TYPE: %s", args[1]) + } + getReq.AssetFullName = args[2] + + response, err := w.CleanRoomAssets.Get(ctx, getReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getOverrides { + fn(cmd, &getReq) + } + + return cmd +} + +// start list command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var listOverrides []func( + *cobra.Command, + *cleanrooms.ListCleanRoomAssetsRequest, +) + +func newList() *cobra.Command { + cmd := &cobra.Command{} + + var listReq cleanrooms.ListCleanRoomAssetsRequest + + // TODO: short flags + + cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `Opaque pagination token to go to next page based on previous query.`) + + cmd.Use = "list CLEAN_ROOM_NAME" + cmd.Short = `List assets.` + cmd.Long = `List assets. + + Arguments: + CLEAN_ROOM_NAME: Name of the clean room.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + listReq.CleanRoomName = args[0] + + response := w.CleanRoomAssets.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range listOverrides { + fn(cmd, &listReq) + } + + return cmd +} + +// start update command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateOverrides []func( + *cobra.Command, + *cleanrooms.UpdateCleanRoomAssetRequest, +) + +func newUpdate() *cobra.Command { + cmd := &cobra.Command{} + + var updateReq cleanrooms.UpdateCleanRoomAssetRequest + updateReq.Asset = &cleanrooms.CleanRoomAsset{} + var updateJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().Var(&updateReq.Asset.AssetType, "asset-type", `The type of the asset. Supported values: [FOREIGN_TABLE, NOTEBOOK_FILE, TABLE, VIEW, VOLUME]`) + // TODO: complex arg: foreign_table + // TODO: complex arg: foreign_table_local_details + cmd.Flags().StringVar(&updateReq.Asset.Name, "name", updateReq.Asset.Name, `A fully qualified name that uniquely identifies the asset within the clean room.`) + // TODO: complex arg: notebook + // TODO: complex arg: table + // TODO: complex arg: table_local_details + // TODO: complex arg: view + // TODO: complex arg: view_local_details + // TODO: complex arg: volume_local_details + + cmd.Use = "update CLEAN_ROOM_NAME ASSET_TYPE NAME" + cmd.Short = `Update an asset.` + cmd.Long = `Update an asset. + + Update a clean room asset. For example, updating the content of a notebook; + changing the shared partitions of a table; etc. + + Arguments: + CLEAN_ROOM_NAME: Name of the clean room. + ASSET_TYPE: The type of the asset. + NAME: A fully qualified name that uniquely identifies the asset within the clean + room. This is also the name displayed in the clean room UI. + + For UC securable assets (tables, volumes, etc.), the format is + *shared_catalog*.*shared_schema*.*asset_name* + + For notebooks, the name is the notebook file name.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(3) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + diags := updateJson.Unmarshal(&updateReq.Asset) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } + } + } + updateReq.CleanRoomName = args[0] + _, err = fmt.Sscan(args[1], &updateReq.AssetType) + if err != nil { + return fmt.Errorf("invalid ASSET_TYPE: %s", args[1]) + } + updateReq.Name = args[2] + + response, err := w.CleanRoomAssets.Update(ctx, updateReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateOverrides { + fn(cmd, &updateReq) + } + + return cmd +} + +// end service CleanRoomAssets diff --git a/cmd/workspace/clean-room-task-runs/clean-room-task-runs.go b/cmd/workspace/clean-room-task-runs/clean-room-task-runs.go new file mode 100755 index 000000000..b41e380cc --- /dev/null +++ b/cmd/workspace/clean-room-task-runs/clean-room-task-runs.go @@ -0,0 +1,97 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package clean_room_task_runs + +import ( + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/databricks-sdk-go/service/cleanrooms" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "clean-room-task-runs", + Short: `Clean room task runs are the executions of notebooks in a clean room.`, + Long: `Clean room task runs are the executions of notebooks in a clean room.`, + GroupID: "cleanrooms", + Annotations: map[string]string{ + "package": "cleanrooms", + }, + } + + // Add methods + cmd.AddCommand(newList()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start list command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var listOverrides []func( + *cobra.Command, + *cleanrooms.ListCleanRoomNotebookTaskRunsRequest, +) + +func newList() *cobra.Command { + cmd := &cobra.Command{} + + var listReq cleanrooms.ListCleanRoomNotebookTaskRunsRequest + + // TODO: short flags + + cmd.Flags().StringVar(&listReq.NotebookName, "notebook-name", listReq.NotebookName, `Notebook name.`) + cmd.Flags().IntVar(&listReq.PageSize, "page-size", listReq.PageSize, `The maximum number of task runs to return.`) + cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `Opaque pagination token to go to next page based on previous query.`) + + cmd.Use = "list CLEAN_ROOM_NAME" + cmd.Short = `List notebook task runs.` + cmd.Long = `List notebook task runs. + + List all the historical notebook task runs in a clean room. + + Arguments: + CLEAN_ROOM_NAME: Name of the clean room.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + listReq.CleanRoomName = args[0] + + response := w.CleanRoomTaskRuns.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range listOverrides { + fn(cmd, &listReq) + } + + return cmd +} + +// end service CleanRoomTaskRuns diff --git a/cmd/workspace/clean-rooms/clean-rooms.go b/cmd/workspace/clean-rooms/clean-rooms.go new file mode 100755 index 000000000..053e41e8a --- /dev/null +++ b/cmd/workspace/clean-rooms/clean-rooms.go @@ -0,0 +1,450 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package clean_rooms + +import ( + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/cleanrooms" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "clean-rooms", + Short: `A clean room uses Delta Sharing and serverless compute to provide a secure and privacy-protecting environment where multiple parties can work together on sensitive enterprise data without direct access to each other’s data.`, + Long: `A clean room uses Delta Sharing and serverless compute to provide a secure and + privacy-protecting environment where multiple parties can work together on + sensitive enterprise data without direct access to each other’s data.`, + GroupID: "cleanrooms", + Annotations: map[string]string{ + "package": "cleanrooms", + }, + } + + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newCreateOutputCatalog()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newUpdate()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start create command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var createOverrides []func( + *cobra.Command, + *cleanrooms.CreateCleanRoomRequest, +) + +func newCreate() *cobra.Command { + cmd := &cobra.Command{} + + var createReq cleanrooms.CreateCleanRoomRequest + createReq.CleanRoom = &cleanrooms.CleanRoom{} + var createJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().StringVar(&createReq.CleanRoom.Comment, "comment", createReq.CleanRoom.Comment, ``) + cmd.Flags().StringVar(&createReq.CleanRoom.Name, "name", createReq.CleanRoom.Name, `The name of the clean room.`) + // TODO: complex arg: output_catalog + cmd.Flags().StringVar(&createReq.CleanRoom.Owner, "owner", createReq.CleanRoom.Owner, `This is Databricks username of the owner of the local clean room securable for permission management.`) + // TODO: complex arg: remote_detailed_info + + cmd.Use = "create" + cmd.Short = `Create a clean room.` + cmd.Long = `Create a clean room. + + Create a new clean room with the specified collaborators. This method is + asynchronous; the returned name field inside the clean_room field can be used + to poll the clean room status, using the :method:cleanrooms/get method. When + this method returns, the cluster will be in a PROVISIONING state. The cluster + will be usable once it enters an ACTIVE state. + + The caller must be a metastore admin or have the **CREATE_CLEAN_ROOM** + privilege on the metastore.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + diags := createJson.Unmarshal(&createReq.CleanRoom) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } + } + } + + response, err := w.CleanRooms.Create(ctx, createReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range createOverrides { + fn(cmd, &createReq) + } + + return cmd +} + +// start create-output-catalog command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var createOutputCatalogOverrides []func( + *cobra.Command, + *cleanrooms.CreateCleanRoomOutputCatalogRequest, +) + +func newCreateOutputCatalog() *cobra.Command { + cmd := &cobra.Command{} + + var createOutputCatalogReq cleanrooms.CreateCleanRoomOutputCatalogRequest + createOutputCatalogReq.OutputCatalog = &cleanrooms.CleanRoomOutputCatalog{} + var createOutputCatalogJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&createOutputCatalogJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().StringVar(&createOutputCatalogReq.OutputCatalog.CatalogName, "catalog-name", createOutputCatalogReq.OutputCatalog.CatalogName, `The name of the output catalog in UC.`) + + cmd.Use = "create-output-catalog CLEAN_ROOM_NAME" + cmd.Short = `Create an output catalog.` + cmd.Long = `Create an output catalog. + + Create the output catalog of the clean room. + + Arguments: + CLEAN_ROOM_NAME: Name of the clean room.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + diags := createOutputCatalogJson.Unmarshal(&createOutputCatalogReq.OutputCatalog) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } + } + } + createOutputCatalogReq.CleanRoomName = args[0] + + response, err := w.CleanRooms.CreateOutputCatalog(ctx, createOutputCatalogReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range createOutputCatalogOverrides { + fn(cmd, &createOutputCatalogReq) + } + + return cmd +} + +// start delete command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteOverrides []func( + *cobra.Command, + *cleanrooms.DeleteCleanRoomRequest, +) + +func newDelete() *cobra.Command { + cmd := &cobra.Command{} + + var deleteReq cleanrooms.DeleteCleanRoomRequest + + // TODO: short flags + + cmd.Use = "delete NAME" + cmd.Short = `Delete a clean room.` + cmd.Long = `Delete a clean room. + + Delete a clean room. After deletion, the clean room will be removed from the + metastore. If the other collaborators have not deleted the clean room, they + will still have the clean room in their metastore, but it will be in a DELETED + state and no operations other than deletion can be performed on it. + + Arguments: + NAME: Name of the clean room.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + deleteReq.Name = args[0] + + err = w.CleanRooms.Delete(ctx, deleteReq) + if err != nil { + return err + } + return nil + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deleteOverrides { + fn(cmd, &deleteReq) + } + + return cmd +} + +// start get command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getOverrides []func( + *cobra.Command, + *cleanrooms.GetCleanRoomRequest, +) + +func newGet() *cobra.Command { + cmd := &cobra.Command{} + + var getReq cleanrooms.GetCleanRoomRequest + + // TODO: short flags + + cmd.Use = "get NAME" + cmd.Short = `Get a clean room.` + cmd.Long = `Get a clean room. + + Get the details of a clean room given its name.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + getReq.Name = args[0] + + response, err := w.CleanRooms.Get(ctx, getReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getOverrides { + fn(cmd, &getReq) + } + + return cmd +} + +// start list command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var listOverrides []func( + *cobra.Command, + *cleanrooms.ListCleanRoomsRequest, +) + +func newList() *cobra.Command { + cmd := &cobra.Command{} + + var listReq cleanrooms.ListCleanRoomsRequest + + // TODO: short flags + + cmd.Flags().IntVar(&listReq.PageSize, "page-size", listReq.PageSize, `Maximum number of clean rooms to return (i.e., the page length).`) + cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `Opaque pagination token to go to next page based on previous query.`) + + cmd.Use = "list" + cmd.Short = `List clean rooms.` + cmd.Long = `List clean rooms. + + Get a list of all clean rooms of the metastore. Only clean rooms the caller + has access to are returned.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + response := w.CleanRooms.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range listOverrides { + fn(cmd, &listReq) + } + + return cmd +} + +// start update command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateOverrides []func( + *cobra.Command, + *cleanrooms.UpdateCleanRoomRequest, +) + +func newUpdate() *cobra.Command { + cmd := &cobra.Command{} + + var updateReq cleanrooms.UpdateCleanRoomRequest + var updateJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + // TODO: complex arg: clean_room + + cmd.Use = "update NAME" + cmd.Short = `Update a clean room.` + cmd.Long = `Update a clean room. + + Update a clean room. The caller must be the owner of the clean room, have + **MODIFY_CLEAN_ROOM** privilege, or be metastore admin. + + When the caller is a metastore admin, only the __owner__ field can be updated. + + Arguments: + NAME: Name of the clean room.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + diags := updateJson.Unmarshal(&updateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } + } + } + updateReq.Name = args[0] + + response, err := w.CleanRooms.Update(ctx, updateReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateOverrides { + fn(cmd, &updateReq) + } + + return cmd +} + +// end service CleanRooms diff --git a/cmd/workspace/clusters/clusters.go b/cmd/workspace/clusters/clusters.go index db788753b..bbb7c578a 100755 --- a/cmd/workspace/clusters/clusters.go +++ b/cmd/workspace/clusters/clusters.go @@ -204,6 +204,9 @@ func newCreate() *cobra.Command { cmd.Flags().StringVar(&createReq.ClusterName, "cluster-name", createReq.ClusterName, `Cluster name requested by the user.`) // TODO: map via StringToStringVar: custom_tags cmd.Flags().Var(&createReq.DataSecurityMode, "data-security-mode", `Data security mode decides what data governance model to use when accessing data from a cluster. Supported values: [ + DATA_SECURITY_MODE_AUTO, + DATA_SECURITY_MODE_DEDICATED, + DATA_SECURITY_MODE_STANDARD, LEGACY_PASSTHROUGH, LEGACY_SINGLE_USER, LEGACY_SINGLE_USER_STANDARD, @@ -220,6 +223,8 @@ func newCreate() *cobra.Command { // TODO: complex arg: gcp_attributes // TODO: array: init_scripts cmd.Flags().StringVar(&createReq.InstancePoolId, "instance-pool-id", createReq.InstancePoolId, `The optional ID of the instance pool to which the cluster belongs.`) + cmd.Flags().BoolVar(&createReq.IsSingleNode, "is-single-node", createReq.IsSingleNode, `This field can only be used with kind.`) + cmd.Flags().Var(&createReq.Kind, "kind", `The kind of compute described by this compute specification. Supported values: [CLASSIC_PREVIEW]`) cmd.Flags().StringVar(&createReq.NodeTypeId, "node-type-id", createReq.NodeTypeId, `This field encodes, through a single value, the resources available to each of the Spark nodes in this cluster.`) cmd.Flags().IntVar(&createReq.NumWorkers, "num-workers", createReq.NumWorkers, `Number of worker nodes that this cluster should have.`) cmd.Flags().StringVar(&createReq.PolicyId, "policy-id", createReq.PolicyId, `The ID of the cluster policy used to create the cluster if applicable.`) @@ -228,6 +233,7 @@ func newCreate() *cobra.Command { // TODO: map via StringToStringVar: spark_conf // TODO: map via StringToStringVar: spark_env_vars // TODO: array: ssh_public_keys + cmd.Flags().BoolVar(&createReq.UseMlRuntime, "use-ml-runtime", createReq.UseMlRuntime, `This field can only be used with kind.`) // TODO: complex arg: workload_type cmd.Use = "create SPARK_VERSION" @@ -468,6 +474,9 @@ func newEdit() *cobra.Command { cmd.Flags().StringVar(&editReq.ClusterName, "cluster-name", editReq.ClusterName, `Cluster name requested by the user.`) // TODO: map via StringToStringVar: custom_tags cmd.Flags().Var(&editReq.DataSecurityMode, "data-security-mode", `Data security mode decides what data governance model to use when accessing data from a cluster. Supported values: [ + DATA_SECURITY_MODE_AUTO, + DATA_SECURITY_MODE_DEDICATED, + DATA_SECURITY_MODE_STANDARD, LEGACY_PASSTHROUGH, LEGACY_SINGLE_USER, LEGACY_SINGLE_USER_STANDARD, @@ -484,6 +493,8 @@ func newEdit() *cobra.Command { // TODO: complex arg: gcp_attributes // TODO: array: init_scripts cmd.Flags().StringVar(&editReq.InstancePoolId, "instance-pool-id", editReq.InstancePoolId, `The optional ID of the instance pool to which the cluster belongs.`) + cmd.Flags().BoolVar(&editReq.IsSingleNode, "is-single-node", editReq.IsSingleNode, `This field can only be used with kind.`) + cmd.Flags().Var(&editReq.Kind, "kind", `The kind of compute described by this compute specification. Supported values: [CLASSIC_PREVIEW]`) cmd.Flags().StringVar(&editReq.NodeTypeId, "node-type-id", editReq.NodeTypeId, `This field encodes, through a single value, the resources available to each of the Spark nodes in this cluster.`) cmd.Flags().IntVar(&editReq.NumWorkers, "num-workers", editReq.NumWorkers, `Number of worker nodes that this cluster should have.`) cmd.Flags().StringVar(&editReq.PolicyId, "policy-id", editReq.PolicyId, `The ID of the cluster policy used to create the cluster if applicable.`) @@ -492,6 +503,7 @@ func newEdit() *cobra.Command { // TODO: map via StringToStringVar: spark_conf // TODO: map via StringToStringVar: spark_env_vars // TODO: array: ssh_public_keys + cmd.Flags().BoolVar(&editReq.UseMlRuntime, "use-ml-runtime", editReq.UseMlRuntime, `This field can only be used with kind.`) // TODO: complex arg: workload_type cmd.Use = "edit CLUSTER_ID SPARK_VERSION" diff --git a/cmd/workspace/cmd.go b/cmd/workspace/cmd.go index 9cb3cca9e..f07d0cf76 100755 --- a/cmd/workspace/cmd.go +++ b/cmd/workspace/cmd.go @@ -8,6 +8,9 @@ import ( apps "github.com/databricks/cli/cmd/workspace/apps" artifact_allowlists "github.com/databricks/cli/cmd/workspace/artifact-allowlists" catalogs "github.com/databricks/cli/cmd/workspace/catalogs" + clean_room_assets "github.com/databricks/cli/cmd/workspace/clean-room-assets" + clean_room_task_runs "github.com/databricks/cli/cmd/workspace/clean-room-task-runs" + clean_rooms "github.com/databricks/cli/cmd/workspace/clean-rooms" cluster_policies "github.com/databricks/cli/cmd/workspace/cluster-policies" clusters "github.com/databricks/cli/cmd/workspace/clusters" connections "github.com/databricks/cli/cmd/workspace/connections" @@ -98,6 +101,9 @@ func All() []*cobra.Command { out = append(out, apps.New()) out = append(out, artifact_allowlists.New()) out = append(out, catalogs.New()) + out = append(out, clean_room_assets.New()) + out = append(out, clean_room_task_runs.New()) + out = append(out, clean_rooms.New()) out = append(out, cluster_policies.New()) out = append(out, clusters.New()) out = append(out, connections.New()) diff --git a/cmd/workspace/credentials/credentials.go b/cmd/workspace/credentials/credentials.go index 44ee0cf31..672a3aeec 100755 --- a/cmd/workspace/credentials/credentials.go +++ b/cmd/workspace/credentials/credentials.go @@ -27,7 +27,7 @@ func New() *cobra.Command { To create credentials, you must be a Databricks account admin or have the CREATE SERVICE CREDENTIAL privilege. The user who creates the credential can - delegate ownership to another user or group to manage permissions on it`, + delegate ownership to another user or group to manage permissions on it.`, GroupID: "catalog", Annotations: map[string]string{ "package": "catalog", @@ -73,7 +73,7 @@ func newCreateCredential() *cobra.Command { // TODO: complex arg: azure_managed_identity // TODO: complex arg: azure_service_principal cmd.Flags().StringVar(&createCredentialReq.Comment, "comment", createCredentialReq.Comment, `Comment associated with the credential.`) - // TODO: complex arg: gcp_service_account_key + // TODO: complex arg: databricks_gcp_service_account cmd.Flags().Var(&createCredentialReq.Purpose, "purpose", `Indicates the purpose of the credential. Supported values: [SERVICE, STORAGE]`) cmd.Flags().BoolVar(&createCredentialReq.ReadOnly, "read-only", createCredentialReq.ReadOnly, `Whether the credential is usable only for read operations.`) cmd.Flags().BoolVar(&createCredentialReq.SkipValidation, "skip-validation", createCredentialReq.SkipValidation, `Optional.`) @@ -227,6 +227,7 @@ func newGenerateTemporaryServiceCredential() *cobra.Command { cmd.Flags().Var(&generateTemporaryServiceCredentialJson, "json", `either inline JSON string or @path/to/file.json with request body`) // TODO: complex arg: azure_options + // TODO: complex arg: gcp_options cmd.Use = "generate-temporary-service-credential CREDENTIAL_NAME" cmd.Short = `Generate a temporary service credential.` @@ -434,6 +435,7 @@ func newUpdateCredential() *cobra.Command { // TODO: complex arg: azure_managed_identity // TODO: complex arg: azure_service_principal cmd.Flags().StringVar(&updateCredentialReq.Comment, "comment", updateCredentialReq.Comment, `Comment associated with the credential.`) + // TODO: complex arg: databricks_gcp_service_account cmd.Flags().BoolVar(&updateCredentialReq.Force, "force", updateCredentialReq.Force, `Force an update even if there are dependent services (when purpose is **SERVICE**) or dependent external locations and external tables (when purpose is **STORAGE**).`) cmd.Flags().Var(&updateCredentialReq.IsolationMode, "isolation-mode", `Whether the current securable is accessible from all workspaces or a specific set of workspaces. Supported values: [ISOLATION_MODE_ISOLATED, ISOLATION_MODE_OPEN]`) cmd.Flags().StringVar(&updateCredentialReq.NewName, "new-name", updateCredentialReq.NewName, `New name of credential.`) diff --git a/cmd/workspace/groups.go b/cmd/workspace/groups.go index 98e474d33..8827682fa 100644 --- a/cmd/workspace/groups.go +++ b/cmd/workspace/groups.go @@ -72,5 +72,9 @@ func Groups() []cobra.Group { ID: "apps", Title: "Apps", }, + { + ID: "cleanrooms", + Title: "Clean Rooms", + }, } } diff --git a/cmd/workspace/lakeview/lakeview.go b/cmd/workspace/lakeview/lakeview.go index 35c3bdf4e..6686f16da 100755 --- a/cmd/workspace/lakeview/lakeview.go +++ b/cmd/workspace/lakeview/lakeview.go @@ -160,9 +160,6 @@ func newCreateSchedule() *cobra.Command { Arguments: DASHBOARD_ID: UUID identifying the dashboard to which the schedule belongs.` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { @@ -242,9 +239,6 @@ func newCreateSubscription() *cobra.Command { DASHBOARD_ID: UUID identifying the dashboard to which the subscription belongs. SCHEDULE_ID: UUID identifying the schedule to which the subscription belongs.` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { @@ -322,9 +316,6 @@ func newDeleteSchedule() *cobra.Command { DASHBOARD_ID: UUID identifying the dashboard to which the schedule belongs. SCHEDULE_ID: UUID identifying the schedule.` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { @@ -384,9 +375,6 @@ func newDeleteSubscription() *cobra.Command { SCHEDULE_ID: UUID identifying the schedule which the subscription belongs. SUBSCRIPTION_ID: UUID identifying the subscription.` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { @@ -562,9 +550,6 @@ func newGetSchedule() *cobra.Command { DASHBOARD_ID: UUID identifying the dashboard to which the schedule belongs. SCHEDULE_ID: UUID identifying the schedule.` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { @@ -624,9 +609,6 @@ func newGetSubscription() *cobra.Command { SCHEDULE_ID: UUID identifying the schedule which the subscription belongs. SUBSCRIPTION_ID: UUID identifying the subscription.` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { @@ -739,9 +721,6 @@ func newListSchedules() *cobra.Command { Arguments: DASHBOARD_ID: UUID identifying the dashboard to which the schedules belongs.` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { @@ -798,9 +777,6 @@ func newListSubscriptions() *cobra.Command { DASHBOARD_ID: UUID identifying the dashboard which the subscriptions belongs. SCHEDULE_ID: UUID identifying the schedule which the subscriptions belongs.` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { @@ -852,6 +828,7 @@ func newMigrate() *cobra.Command { cmd.Flags().StringVar(&migrateReq.DisplayName, "display-name", migrateReq.DisplayName, `Display name for the new Lakeview dashboard.`) cmd.Flags().StringVar(&migrateReq.ParentPath, "parent-path", migrateReq.ParentPath, `The workspace path of the folder to contain the migrated Lakeview dashboard.`) + cmd.Flags().BoolVar(&migrateReq.UpdateParameterSyntax, "update-parameter-syntax", migrateReq.UpdateParameterSyntax, `Flag to indicate if mustache parameter syntax ({{ param }}) should be auto-updated to named syntax (:param) when converting datasets in the dashboard.`) cmd.Use = "migrate SOURCE_DASHBOARD_ID" cmd.Short = `Migrate dashboard.` @@ -1215,9 +1192,6 @@ func newUpdateSchedule() *cobra.Command { DASHBOARD_ID: UUID identifying the dashboard to which the schedule belongs. SCHEDULE_ID: UUID identifying the schedule.` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { diff --git a/cmd/workspace/repos/overrides.go b/cmd/workspace/repos/overrides.go index aad38ecc7..561921623 100644 --- a/cmd/workspace/repos/overrides.go +++ b/cmd/workspace/repos/overrides.go @@ -2,6 +2,7 @@ package repos import ( "context" + "errors" "fmt" "strconv" @@ -153,7 +154,7 @@ func repoArgumentToRepoID(ctx context.Context, w *databricks.WorkspaceClient, ar args = append(args, id) } if len(args) != 1 { - return 0, fmt.Errorf("expected to have the id for the corresponding repo to access") + return 0, errors.New("expected to have the id for the corresponding repo to access") } // ---- End copy from cmd/workspace/repos/repos.go ---- diff --git a/cmd/workspace/secrets/put_secret.go b/cmd/workspace/secrets/put_secret.go index f24814f05..b446524f7 100644 --- a/cmd/workspace/secrets/put_secret.go +++ b/cmd/workspace/secrets/put_secret.go @@ -2,7 +2,7 @@ package secrets import ( "encoding/base64" - "fmt" + "errors" "io" "os" @@ -67,7 +67,7 @@ func newPutSecret() *cobra.Command { bytesValueChanged := cmd.Flags().Changed("bytes-value") stringValueChanged := cmd.Flags().Changed("string-value") if bytesValueChanged && stringValueChanged { - return fmt.Errorf("cannot specify both --bytes-value and --string-value") + return errors.New("cannot specify both --bytes-value and --string-value") } if cmd.Flags().Changed("json") { diff --git a/cmd/workspace/workspace/export_dir.go b/cmd/workspace/workspace/export_dir.go index 0046f46ef..febe4c3e1 100644 --- a/cmd/workspace/workspace/export_dir.go +++ b/cmd/workspace/workspace/export_dir.go @@ -39,7 +39,7 @@ func (opts exportDirOptions) callback(ctx context.Context, workspaceFiler filer. // create directory and return early if d.IsDir() { - return os.MkdirAll(targetPath, 0755) + return os.MkdirAll(targetPath, 0o755) } // Add extension to local file path if the file is a notebook diff --git a/cmd/workspace/workspace/overrides.go b/cmd/workspace/workspace/overrides.go index cfed0a6ee..53438a764 100644 --- a/cmd/workspace/workspace/overrides.go +++ b/cmd/workspace/workspace/overrides.go @@ -36,7 +36,7 @@ func exportOverride(exportCmd *cobra.Command, exportReq *workspace.ExportRequest ctx := cmd.Context() w := root.WorkspaceClient(ctx) if len(args) != 1 { - return fmt.Errorf("expected to have the absolute path of the object or directory") + return errors.New("expected to have the absolute path of the object or directory") } exportReq.Path = args[0] @@ -52,7 +52,7 @@ func exportOverride(exportCmd *cobra.Command, exportReq *workspace.ExportRequest if err != nil { return err } - return os.WriteFile(filePath, b, 0755) + return os.WriteFile(filePath, b, 0o755) } } @@ -88,7 +88,6 @@ func importOverride(importCmd *cobra.Command, importReq *workspace.Import) { err := originalRunE(cmd, args) return wrapImportAPIErrors(err, importReq) } - } func init() { diff --git a/go.mod b/go.mod index 7141ed768..86bc1c368 100644 --- a/go.mod +++ b/go.mod @@ -2,19 +2,19 @@ module github.com/databricks/cli go 1.23 -toolchain go1.23.2 +toolchain go1.23.4 require ( github.com/Masterminds/semver/v3 v3.3.1 // MIT github.com/briandowns/spinner v1.23.1 // Apache 2.0 - github.com/databricks/databricks-sdk-go v0.52.0 // Apache 2.0 + github.com/databricks/databricks-sdk-go v0.54.0 // Apache 2.0 github.com/fatih/color v1.18.0 // MIT - github.com/ghodss/yaml v1.0.0 // MIT + NOTICE github.com/google/uuid v1.6.0 // BSD-3-Clause github.com/hashicorp/go-version v1.7.0 // MPL 2.0 github.com/hashicorp/hc-install v0.9.0 // MPL 2.0 github.com/hashicorp/terraform-exec v0.21.0 // MPL 2.0 github.com/hashicorp/terraform-json v0.23.0 // MPL 2.0 + github.com/hexops/gotextdiff v1.0.3 // BSD 3-Clause "New" or "Revised" License github.com/manifoldco/promptui v0.9.0 // BSD-3-Clause github.com/mattn/go-isatty v0.0.20 // MIT github.com/nwidger/jsoncolor v0.3.2 // MIT @@ -23,12 +23,13 @@ require ( github.com/spf13/cobra v1.8.1 // Apache 2.0 github.com/spf13/pflag v1.0.5 // BSD-3-Clause github.com/stretchr/testify v1.10.0 // MIT + github.com/wI2L/jsondiff v0.6.1 // MIT golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 golang.org/x/mod v0.22.0 golang.org/x/oauth2 v0.24.0 - golang.org/x/sync v0.9.0 - golang.org/x/term v0.26.0 - golang.org/x/text v0.20.0 + golang.org/x/sync v0.10.0 + golang.org/x/term v0.27.0 + golang.org/x/text v0.21.0 gopkg.in/ini.v1 v1.67.0 // Apache 2.0 gopkg.in/yaml.v3 v3.0.1 ) @@ -56,19 +57,22 @@ require ( github.com/mattn/go-colorable v0.1.13 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/stretchr/objx v0.5.2 // indirect + github.com/tidwall/gjson v1.18.0 // indirect + github.com/tidwall/match v1.1.1 // indirect + github.com/tidwall/pretty v1.2.1 // indirect + github.com/tidwall/sjson v1.2.5 // indirect github.com/zclconf/go-cty v1.15.0 // indirect go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect go.opentelemetry.io/otel v1.24.0 // indirect go.opentelemetry.io/otel/metric v1.24.0 // indirect go.opentelemetry.io/otel/trace v1.24.0 // indirect - golang.org/x/crypto v0.24.0 // indirect - golang.org/x/net v0.26.0 // indirect - golang.org/x/sys v0.27.0 // indirect + golang.org/x/crypto v0.31.0 // indirect + golang.org/x/net v0.33.0 // indirect + golang.org/x/sys v0.28.0 // indirect golang.org/x/time v0.5.0 // indirect google.golang.org/api v0.182.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240521202816-d264139d666e // indirect google.golang.org/grpc v1.64.1 // indirect google.golang.org/protobuf v1.34.1 // indirect - gopkg.in/yaml.v2 v2.4.0 // indirect ) diff --git a/go.sum b/go.sum index 5d2c53a37..f6cf79607 100644 --- a/go.sum +++ b/go.sum @@ -32,8 +32,8 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= -github.com/databricks/databricks-sdk-go v0.52.0 h1:WKcj0F+pdx0gjI5xMicjYC4O43S2q5nyTpaGGMFmgHw= -github.com/databricks/databricks-sdk-go v0.52.0/go.mod h1:ds+zbv5mlQG7nFEU5ojLtgN/u0/9YzZmKQES/CfedzU= +github.com/databricks/databricks-sdk-go v0.54.0 h1:L8gsA3NXs+uYU3QtW/OUgjxMQxOH24k0MT9JhB3zLlM= +github.com/databricks/databricks-sdk-go v0.54.0/go.mod h1:ds+zbv5mlQG7nFEU5ojLtgN/u0/9YzZmKQES/CfedzU= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -48,8 +48,6 @@ github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic= github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU= @@ -111,6 +109,8 @@ github.com/hashicorp/terraform-exec v0.21.0 h1:uNkLAe95ey5Uux6KJdua6+cv8asgILFVW github.com/hashicorp/terraform-exec v0.21.0/go.mod h1:1PPeMYou+KDUSSeRE9szMZ/oHf4fYUmB923Wzbq1ICg= github.com/hashicorp/terraform-json v0.23.0 h1:sniCkExU4iKtTADReHzACkk8fnpQXrdD2xoR+lppBkI= github.com/hashicorp/terraform-json v0.23.0/go.mod h1:MHdXbBAbSg0GvzuWazEGKAn/cyNfIB7mN6y7KJN6y2c= +github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= +github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= @@ -158,6 +158,18 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/tidwall/gjson v1.14.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY= +github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= +github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= +github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4= +github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY= +github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28= +github.com/wI2L/jsondiff v0.6.1 h1:ISZb9oNWbP64LHnu4AUhsMF5W0FIj5Ok3Krip9Shqpw= +github.com/wI2L/jsondiff v0.6.1/go.mod h1:KAEIojdQq66oJiHhDyQez2x+sRit0vIzC9KeK0yizxM= github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= github.com/zclconf/go-cty v1.15.0 h1:tTCRWxsexYUmtt/wVxgDClUe+uQusuI443uL6e+5sXQ= @@ -176,8 +188,8 @@ go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= -golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= +golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= +golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 h1:LfspQV/FYTatPTr/3HzIcmiUFH7PGP+OQ6mgDYo3yuQ= golang.org/x/exp v0.0.0-20240222234643-814bf88cf225/go.mod h1:CxmFvTBINI24O/j8iY7H1xHzx2i4OsyguNBmN/uPtqc= @@ -192,16 +204,16 @@ golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= -golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= +golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= +golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.24.0 h1:KTBBxWqUa0ykRPLtV69rRto9TLXcqYkeswu48x/gvNE= golang.org/x/oauth2 v0.24.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ= -golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -212,14 +224,14 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= -golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.26.0 h1:WEQa6V3Gja/BhNxg540hBip/kkaYtRg3cxg4oXSw4AU= -golang.org/x/term v0.26.0/go.mod h1:Si5m1o57C5nBNQo5z1iq+XDijt21BDBDp2bK0QI8e3E= +golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= +golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= -golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -263,8 +275,6 @@ gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/integration/README.md b/integration/README.md new file mode 100644 index 000000000..1c1d7c6f6 --- /dev/null +++ b/integration/README.md @@ -0,0 +1,37 @@ +# Integration tests + +This directory contains integration tests for the project. + +The tree structure generally mirrors the source code tree structure. + +Requirements for new files in this directory: +* Every package **must** be named after its directory with `_test` appended + * Requiring a different package name for integration tests avoids aliasing with the main package. +* Every integration test package **must** include a `main_test.go` file. + +These requirements are enforced by a unit test in this directory. + +## Running integration tests + +Integration tests require the following environment variables: +* `CLOUD_ENV` - set to the cloud environment to use (e.g. `aws`, `azure`, `gcp`) +* `DATABRICKS_HOST` - set to the Databricks workspace to use +* `DATABRICKS_TOKEN` - set to the Databricks token to use + +Optional environment variables: +* `TEST_DEFAULT_WAREHOUSE_ID` - set to the default warehouse ID to use +* `TEST_METASTORE_ID` - set to the metastore ID to use +* `TEST_INSTANCE_POOL_ID` - set to the instance pool ID to use +* `TEST_BRICKS_CLUSTER_ID` - set to the cluster ID to use + +To run all integration tests, use the following command: + +```bash +go test ./integration/... +``` + +Alternatively: + +```bash +make integration +``` diff --git a/internal/dashboard_assumptions_test.go b/integration/assumptions/dashboard_assumptions_test.go similarity index 89% rename from internal/dashboard_assumptions_test.go rename to integration/assumptions/dashboard_assumptions_test.go index 64294873d..3a1dcc907 100644 --- a/internal/dashboard_assumptions_test.go +++ b/integration/assumptions/dashboard_assumptions_test.go @@ -1,10 +1,11 @@ -package internal +package assumptions_test import ( "encoding/base64" "testing" - "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/integration/internal/acc" + "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/dyn" "github.com/databricks/cli/libs/dyn/convert" "github.com/databricks/cli/libs/dyn/merge" @@ -18,16 +19,16 @@ import ( // Verify that importing a dashboard through the Workspace API retains the identity of the underying resource, // as well as properties exclusively accessible through the dashboards API. -func TestAccDashboardAssumptions_WorkspaceImport(t *testing.T) { +func TestDashboardAssumptions_WorkspaceImport(t *testing.T) { ctx, wt := acc.WorkspaceTest(t) t.Parallel() dashboardName := "New Dashboard" dashboardPayload := []byte(`{"pages":[{"name":"2506f97a","displayName":"New Page"}]}`) - warehouseId := acc.GetEnvOrSkipTest(t, "TEST_DEFAULT_WAREHOUSE_ID") + warehouseId := testutil.GetEnvOrSkipTest(t, "TEST_DEFAULT_WAREHOUSE_ID") - dir := wt.TemporaryWorkspaceDir("dashboard-assumptions-") + dir := acc.TemporaryWorkspaceDir(wt, "dashboard-assumptions-") dashboard, err := wt.W.Lakeview.Create(ctx, dashboards.CreateDashboardRequest{ Dashboard: &dashboards.Dashboard{ @@ -98,7 +99,7 @@ func TestAccDashboardAssumptions_WorkspaceImport(t *testing.T) { assert.Fail(t, "unexpected insert operation") return right, nil }, - VisitUpdate: func(basePath dyn.Path, left dyn.Value, right dyn.Value) (dyn.Value, error) { + VisitUpdate: func(basePath dyn.Path, left, right dyn.Value) (dyn.Value, error) { updatedFieldPaths = append(updatedFieldPaths, basePath.String()) return right, nil }, diff --git a/internal/bundle/artifacts_test.go b/integration/bundle/artifacts_test.go similarity index 76% rename from internal/bundle/artifacts_test.go rename to integration/bundle/artifacts_test.go index 34d101e4f..94b96899e 100644 --- a/internal/bundle/artifacts_test.go +++ b/integration/bundle/artifacts_test.go @@ -1,4 +1,4 @@ -package bundle +package bundle_test import ( "fmt" @@ -12,8 +12,10 @@ import ( "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config/resources" "github.com/databricks/cli/bundle/libraries" - "github.com/databricks/cli/internal" - "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/integration/internal/acc" + "github.com/databricks/cli/internal/testcli" + "github.com/databricks/cli/internal/testutil" + "github.com/databricks/cli/libs/env" "github.com/databricks/databricks-sdk-go/service/catalog" "github.com/databricks/databricks-sdk-go/service/compute" "github.com/databricks/databricks-sdk-go/service/jobs" @@ -23,21 +25,20 @@ import ( ) func touchEmptyFile(t *testing.T, path string) { - err := os.MkdirAll(filepath.Dir(path), 0700) + err := os.MkdirAll(filepath.Dir(path), 0o700) require.NoError(t, err) f, err := os.Create(path) require.NoError(t, err) f.Close() } -func TestAccUploadArtifactFileToCorrectRemotePath(t *testing.T) { +func TestUploadArtifactFileToCorrectRemotePath(t *testing.T) { ctx, wt := acc.WorkspaceTest(t) - w := wt.W dir := t.TempDir() whlPath := filepath.Join(dir, "dist", "test.whl") touchEmptyFile(t, whlPath) - wsDir := internal.TemporaryWorkspaceDir(t, w) + wsDir := acc.TemporaryWorkspaceDir(wt, "artifact-") b := &bundle.Bundle{ BundleRootPath: dir, @@ -84,25 +85,24 @@ func TestAccUploadArtifactFileToCorrectRemotePath(t *testing.T) { // The remote path attribute on the artifact file should have been set. require.Regexp(t, - regexp.MustCompile(path.Join(regexp.QuoteMeta(wsDir), `.internal/test\.whl`)), + path.Join(regexp.QuoteMeta(wsDir), `.internal/test\.whl`), b.Config.Artifacts["test"].Files[0].RemotePath, ) // The task library path should have been updated to the remote path. require.Regexp(t, - regexp.MustCompile(path.Join("/Workspace", regexp.QuoteMeta(wsDir), `.internal/test\.whl`)), + path.Join("/Workspace", regexp.QuoteMeta(wsDir), `.internal/test\.whl`), b.Config.Resources.Jobs["test"].JobSettings.Tasks[0].Libraries[0].Whl, ) } -func TestAccUploadArtifactFileToCorrectRemotePathWithEnvironments(t *testing.T) { +func TestUploadArtifactFileToCorrectRemotePathWithEnvironments(t *testing.T) { ctx, wt := acc.WorkspaceTest(t) - w := wt.W dir := t.TempDir() whlPath := filepath.Join(dir, "dist", "test.whl") touchEmptyFile(t, whlPath) - wsDir := internal.TemporaryWorkspaceDir(t, w) + wsDir := acc.TemporaryWorkspaceDir(wt, "artifact-") b := &bundle.Bundle{ BundleRootPath: dir, @@ -149,26 +149,25 @@ func TestAccUploadArtifactFileToCorrectRemotePathWithEnvironments(t *testing.T) // The remote path attribute on the artifact file should have been set. require.Regexp(t, - regexp.MustCompile(path.Join(regexp.QuoteMeta(wsDir), `.internal/test\.whl`)), + path.Join(regexp.QuoteMeta(wsDir), `.internal/test\.whl`), b.Config.Artifacts["test"].Files[0].RemotePath, ) // The job environment deps path should have been updated to the remote path. require.Regexp(t, - regexp.MustCompile(path.Join("/Workspace", regexp.QuoteMeta(wsDir), `.internal/test\.whl`)), + path.Join("/Workspace", regexp.QuoteMeta(wsDir), `.internal/test\.whl`), b.Config.Resources.Jobs["test"].JobSettings.Environments[0].Spec.Dependencies[0], ) } -func TestAccUploadArtifactFileToCorrectRemotePathForVolumes(t *testing.T) { +func TestUploadArtifactFileToCorrectRemotePathForVolumes(t *testing.T) { ctx, wt := acc.WorkspaceTest(t) - w := wt.W if os.Getenv("TEST_METASTORE_ID") == "" { t.Skip("Skipping tests that require a UC Volume when metastore id is not set.") } - volumePath := internal.TemporaryUcVolume(t, w) + volumePath := acc.TemporaryVolume(wt) dir := t.TempDir() whlPath := filepath.Join(dir, "dist", "test.whl") @@ -219,22 +218,22 @@ func TestAccUploadArtifactFileToCorrectRemotePathForVolumes(t *testing.T) { // The remote path attribute on the artifact file should have been set. require.Regexp(t, - regexp.MustCompile(path.Join(regexp.QuoteMeta(volumePath), `.internal/test\.whl`)), + path.Join(regexp.QuoteMeta(volumePath), `.internal/test\.whl`), b.Config.Artifacts["test"].Files[0].RemotePath, ) // The task library path should have been updated to the remote path. require.Regexp(t, - regexp.MustCompile(path.Join(regexp.QuoteMeta(volumePath), `.internal/test\.whl`)), + path.Join(regexp.QuoteMeta(volumePath), `.internal/test\.whl`), b.Config.Resources.Jobs["test"].JobSettings.Tasks[0].Libraries[0].Whl, ) } -func TestAccUploadArtifactFileToVolumeThatDoesNotExist(t *testing.T) { +func TestUploadArtifactFileToVolumeThatDoesNotExist(t *testing.T) { ctx, wt := acc.UcWorkspaceTest(t) w := wt.W - schemaName := internal.RandomName("schema-") + schemaName := testutil.RandomName("schema-") _, err := w.Schemas.Create(ctx, catalog.CreateSchema{ CatalogName: "main", @@ -248,18 +247,17 @@ func TestAccUploadArtifactFileToVolumeThatDoesNotExist(t *testing.T) { require.NoError(t, err) }) - bundleRoot, err := initTestTemplate(t, ctx, "artifact_path_with_volume", map[string]any{ + bundleRoot := initTestTemplate(t, ctx, "artifact_path_with_volume", map[string]any{ "unique_id": uuid.New().String(), "schema_name": schemaName, "volume_name": "doesnotexist", }) - require.NoError(t, err) - t.Setenv("BUNDLE_ROOT", bundleRoot) - stdout, stderr, err := internal.RequireErrorRun(t, "bundle", "deploy") + ctx = env.Set(ctx, "BUNDLE_ROOT", bundleRoot) + stdout, stderr, err := testcli.RequireErrorRun(t, ctx, "bundle", "deploy") assert.Error(t, err) - assert.Equal(t, fmt.Sprintf(`Error: volume /Volumes/main/%s/doesnotexist does not exist: Not Found + assert.Equal(t, fmt.Sprintf(`Error: volume main.%s.doesnotexist does not exist at workspace.artifact_path in databricks.yml:6:18 @@ -267,11 +265,11 @@ func TestAccUploadArtifactFileToVolumeThatDoesNotExist(t *testing.T) { assert.Equal(t, "", stderr.String()) } -func TestAccUploadArtifactToVolumeNotYetDeployed(t *testing.T) { +func TestUploadArtifactToVolumeNotYetDeployed(t *testing.T) { ctx, wt := acc.UcWorkspaceTest(t) w := wt.W - schemaName := internal.RandomName("schema-") + schemaName := testutil.RandomName("schema-") _, err := w.Schemas.Create(ctx, catalog.CreateSchema{ CatalogName: "main", @@ -285,18 +283,17 @@ func TestAccUploadArtifactToVolumeNotYetDeployed(t *testing.T) { require.NoError(t, err) }) - bundleRoot, err := initTestTemplate(t, ctx, "artifact_path_with_volume", map[string]any{ + bundleRoot := initTestTemplate(t, ctx, "artifact_path_with_volume", map[string]any{ "unique_id": uuid.New().String(), "schema_name": schemaName, "volume_name": "my_volume", }) - require.NoError(t, err) - t.Setenv("BUNDLE_ROOT", bundleRoot) - stdout, stderr, err := internal.RequireErrorRun(t, "bundle", "deploy") + ctx = env.Set(ctx, "BUNDLE_ROOT", bundleRoot) + stdout, stderr, err := testcli.RequireErrorRun(t, ctx, "bundle", "deploy") assert.Error(t, err) - assert.Equal(t, fmt.Sprintf(`Error: volume /Volumes/main/%s/my_volume does not exist: Not Found + assert.Equal(t, fmt.Sprintf(`Error: volume main.%s.my_volume does not exist at workspace.artifact_path resources.volumes.foo in databricks.yml:6:18 diff --git a/integration/bundle/basic_test.go b/integration/bundle/basic_test.go new file mode 100644 index 000000000..79301b850 --- /dev/null +++ b/integration/bundle/basic_test.go @@ -0,0 +1,37 @@ +package bundle_test + +import ( + "os" + "path/filepath" + "testing" + + "github.com/databricks/cli/integration/internal/acc" + "github.com/databricks/cli/internal/testutil" + "github.com/google/uuid" + "github.com/stretchr/testify/require" +) + +func TestBasicBundleDeployWithFailOnActiveRuns(t *testing.T) { + ctx, _ := acc.WorkspaceTest(t) + + nodeTypeId := testutil.GetCloud(t).NodeTypeID() + uniqueId := uuid.New().String() + root := initTestTemplate(t, ctx, "basic", map[string]any{ + "unique_id": uniqueId, + "node_type_id": nodeTypeId, + "spark_version": defaultSparkVersion, + }) + + t.Cleanup(func() { + destroyBundle(t, ctx, root) + }) + + // deploy empty bundle + deployBundleWithFlags(t, ctx, root, []string{"--fail-on-active-runs"}) + + // Remove .databricks directory to simulate a fresh deployment + require.NoError(t, os.RemoveAll(filepath.Join(root, ".databricks"))) + + // deploy empty bundle again + deployBundleWithFlags(t, ctx, root, []string{"--fail-on-active-runs"}) +} diff --git a/internal/bundle/bind_resource_test.go b/integration/bundle/bind_resource_test.go similarity index 60% rename from internal/bundle/bind_resource_test.go rename to integration/bundle/bind_resource_test.go index 8cc5da536..ba10190aa 100644 --- a/internal/bundle/bind_resource_test.go +++ b/integration/bundle/bind_resource_test.go @@ -1,13 +1,15 @@ -package bundle +package bundle_test import ( - "fmt" "os" "path/filepath" + "strconv" "testing" - "github.com/databricks/cli/internal" - "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/integration/internal/acc" + "github.com/databricks/cli/internal/testcli" + "github.com/databricks/cli/internal/testutil" + "github.com/databricks/cli/libs/env" "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/service/jobs" "github.com/google/uuid" @@ -15,39 +17,33 @@ import ( "github.com/stretchr/testify/require" ) -func TestAccBindJobToExistingJob(t *testing.T) { - env := internal.GetEnvOrSkipTest(t, "CLOUD_ENV") - t.Log(env) - +func TestBindJobToExistingJob(t *testing.T) { ctx, wt := acc.WorkspaceTest(t) - gt := &generateJobTest{T: t, w: wt.W} + gt := &generateJobTest{T: wt, w: wt.W} - nodeTypeId := internal.GetNodeTypeId(env) + nodeTypeId := testutil.GetCloud(t).NodeTypeID() uniqueId := uuid.New().String() - bundleRoot, err := initTestTemplate(t, ctx, "basic", map[string]any{ + bundleRoot := initTestTemplate(t, ctx, "basic", map[string]any{ "unique_id": uniqueId, "spark_version": "13.3.x-scala2.12", "node_type_id": nodeTypeId, }) - require.NoError(t, err) jobId := gt.createTestJob(ctx) t.Cleanup(func() { gt.destroyJob(ctx, jobId) - require.NoError(t, err) }) - t.Setenv("BUNDLE_ROOT", bundleRoot) - c := internal.NewCobraTestRunner(t, "bundle", "deployment", "bind", "foo", fmt.Sprint(jobId), "--auto-approve") - _, _, err = c.Run() + ctx = env.Set(ctx, "BUNDLE_ROOT", bundleRoot) + c := testcli.NewRunner(t, ctx, "bundle", "deployment", "bind", "foo", strconv.FormatInt(jobId, 10), "--auto-approve") + _, _, err := c.Run() require.NoError(t, err) // Remove .databricks directory to simulate a fresh deployment err = os.RemoveAll(filepath.Join(bundleRoot, ".databricks")) require.NoError(t, err) - err = deployBundle(t, ctx, bundleRoot) - require.NoError(t, err) + deployBundle(t, ctx, bundleRoot) w, err := databricks.NewWorkspaceClient() require.NoError(t, err) @@ -57,10 +53,10 @@ func TestAccBindJobToExistingJob(t *testing.T) { JobId: jobId, }) require.NoError(t, err) - require.Equal(t, job.Settings.Name, fmt.Sprintf("test-job-basic-%s", uniqueId)) + require.Equal(t, job.Settings.Name, "test-job-basic-"+uniqueId) require.Contains(t, job.Settings.Tasks[0].SparkPythonTask.PythonFile, "hello_world.py") - c = internal.NewCobraTestRunner(t, "bundle", "deployment", "unbind", "foo") + c = testcli.NewRunner(t, ctx, "bundle", "deployment", "unbind", "foo") _, _, err = c.Run() require.NoError(t, err) @@ -68,33 +64,28 @@ func TestAccBindJobToExistingJob(t *testing.T) { err = os.RemoveAll(filepath.Join(bundleRoot, ".databricks")) require.NoError(t, err) - err = destroyBundle(t, ctx, bundleRoot) - require.NoError(t, err) + destroyBundle(t, ctx, bundleRoot) // Check that job is unbound and exists after bundle is destroyed job, err = w.Jobs.Get(ctx, jobs.GetJobRequest{ JobId: jobId, }) require.NoError(t, err) - require.Equal(t, job.Settings.Name, fmt.Sprintf("test-job-basic-%s", uniqueId)) + require.Equal(t, job.Settings.Name, "test-job-basic-"+uniqueId) require.Contains(t, job.Settings.Tasks[0].SparkPythonTask.PythonFile, "hello_world.py") } -func TestAccAbortBind(t *testing.T) { - env := internal.GetEnvOrSkipTest(t, "CLOUD_ENV") - t.Log(env) - +func TestAbortBind(t *testing.T) { ctx, wt := acc.WorkspaceTest(t) - gt := &generateJobTest{T: t, w: wt.W} + gt := &generateJobTest{T: wt, w: wt.W} - nodeTypeId := internal.GetNodeTypeId(env) + nodeTypeId := testutil.GetCloud(t).NodeTypeID() uniqueId := uuid.New().String() - bundleRoot, err := initTestTemplate(t, ctx, "basic", map[string]any{ + bundleRoot := initTestTemplate(t, ctx, "basic", map[string]any{ "unique_id": uniqueId, "spark_version": "13.3.x-scala2.12", "node_type_id": nodeTypeId, }) - require.NoError(t, err) jobId := gt.createTestJob(ctx) t.Cleanup(func() { @@ -103,17 +94,16 @@ func TestAccAbortBind(t *testing.T) { }) // Bind should fail because prompting is not possible. - t.Setenv("BUNDLE_ROOT", bundleRoot) - t.Setenv("TERM", "dumb") - c := internal.NewCobraTestRunner(t, "bundle", "deployment", "bind", "foo", fmt.Sprint(jobId)) + ctx = env.Set(ctx, "BUNDLE_ROOT", bundleRoot) + ctx = env.Set(ctx, "TERM", "dumb") + c := testcli.NewRunner(t, ctx, "bundle", "deployment", "bind", "foo", strconv.FormatInt(jobId, 10)) // Expect error suggesting to use --auto-approve - _, _, err = c.Run() + _, _, err := c.Run() assert.ErrorContains(t, err, "failed to bind the resource") assert.ErrorContains(t, err, "This bind operation requires user confirmation, but the current console does not support prompting. Please specify --auto-approve if you would like to skip prompts and proceed") - err = deployBundle(t, ctx, bundleRoot) - require.NoError(t, err) + deployBundle(t, ctx, bundleRoot) w, err := databricks.NewWorkspaceClient() require.NoError(t, err) @@ -124,22 +114,18 @@ func TestAccAbortBind(t *testing.T) { }) require.NoError(t, err) - require.NotEqual(t, job.Settings.Name, fmt.Sprintf("test-job-basic-%s", uniqueId)) + require.NotEqual(t, job.Settings.Name, "test-job-basic-"+uniqueId) require.Contains(t, job.Settings.Tasks[0].NotebookTask.NotebookPath, "test") } -func TestAccGenerateAndBind(t *testing.T) { - env := internal.GetEnvOrSkipTest(t, "CLOUD_ENV") - t.Log(env) - +func TestGenerateAndBind(t *testing.T) { ctx, wt := acc.WorkspaceTest(t) - gt := &generateJobTest{T: t, w: wt.W} + gt := &generateJobTest{T: wt, w: wt.W} uniqueId := uuid.New().String() - bundleRoot, err := initTestTemplate(t, ctx, "with_includes", map[string]any{ + bundleRoot := initTestTemplate(t, ctx, "with_includes", map[string]any{ "unique_id": uniqueId, }) - require.NoError(t, err) w, err := databricks.NewWorkspaceClient() require.NoError(t, err) @@ -154,10 +140,10 @@ func TestAccGenerateAndBind(t *testing.T) { } }) - t.Setenv("BUNDLE_ROOT", bundleRoot) - c := internal.NewCobraTestRunnerWithContext(t, ctx, "bundle", "generate", "job", + ctx = env.Set(ctx, "BUNDLE_ROOT", bundleRoot) + c := testcli.NewRunner(t, ctx, "bundle", "generate", "job", "--key", "test_job_key", - "--existing-job-id", fmt.Sprint(jobId), + "--existing-job-id", strconv.FormatInt(jobId, 10), "--config-dir", filepath.Join(bundleRoot, "resources"), "--source-dir", filepath.Join(bundleRoot, "src")) _, _, err = c.Run() @@ -171,15 +157,13 @@ func TestAccGenerateAndBind(t *testing.T) { require.Len(t, matches, 1) - c = internal.NewCobraTestRunner(t, "bundle", "deployment", "bind", "test_job_key", fmt.Sprint(jobId), "--auto-approve") + c = testcli.NewRunner(t, ctx, "bundle", "deployment", "bind", "test_job_key", strconv.FormatInt(jobId, 10), "--auto-approve") _, _, err = c.Run() require.NoError(t, err) - err = deployBundle(t, ctx, bundleRoot) - require.NoError(t, err) + deployBundle(t, ctx, bundleRoot) - err = destroyBundle(t, ctx, bundleRoot) - require.NoError(t, err) + destroyBundle(t, ctx, bundleRoot) // Check that job is bound and does not extsts after bundle is destroyed _, err = w.Jobs.Get(ctx, jobs.GetJobRequest{ diff --git a/internal/bundle/bundles/artifact_path_with_volume/databricks_template_schema.json b/integration/bundle/bundles/artifact_path_with_volume/databricks_template_schema.json similarity index 100% rename from internal/bundle/bundles/artifact_path_with_volume/databricks_template_schema.json rename to integration/bundle/bundles/artifact_path_with_volume/databricks_template_schema.json diff --git a/internal/bundle/bundles/artifact_path_with_volume/template/databricks.yml.tmpl b/integration/bundle/bundles/artifact_path_with_volume/template/databricks.yml.tmpl similarity index 100% rename from internal/bundle/bundles/artifact_path_with_volume/template/databricks.yml.tmpl rename to integration/bundle/bundles/artifact_path_with_volume/template/databricks.yml.tmpl diff --git a/internal/bundle/bundles/basic/databricks_template_schema.json b/integration/bundle/bundles/basic/databricks_template_schema.json similarity index 100% rename from internal/bundle/bundles/basic/databricks_template_schema.json rename to integration/bundle/bundles/basic/databricks_template_schema.json diff --git a/internal/bundle/bundles/basic/template/databricks.yml.tmpl b/integration/bundle/bundles/basic/template/databricks.yml.tmpl similarity index 100% rename from internal/bundle/bundles/basic/template/databricks.yml.tmpl rename to integration/bundle/bundles/basic/template/databricks.yml.tmpl diff --git a/internal/bundle/bundles/basic/template/hello_world.py b/integration/bundle/bundles/basic/template/hello_world.py similarity index 100% rename from internal/bundle/bundles/basic/template/hello_world.py rename to integration/bundle/bundles/basic/template/hello_world.py diff --git a/internal/bundle/bundles/clusters/databricks_template_schema.json b/integration/bundle/bundles/clusters/databricks_template_schema.json similarity index 100% rename from internal/bundle/bundles/clusters/databricks_template_schema.json rename to integration/bundle/bundles/clusters/databricks_template_schema.json diff --git a/internal/bundle/bundles/clusters/template/databricks.yml.tmpl b/integration/bundle/bundles/clusters/template/databricks.yml.tmpl similarity index 100% rename from internal/bundle/bundles/clusters/template/databricks.yml.tmpl rename to integration/bundle/bundles/clusters/template/databricks.yml.tmpl diff --git a/internal/bundle/bundles/clusters/template/hello_world.py b/integration/bundle/bundles/clusters/template/hello_world.py similarity index 100% rename from internal/bundle/bundles/clusters/template/hello_world.py rename to integration/bundle/bundles/clusters/template/hello_world.py diff --git a/internal/bundle/bundles/dashboards/databricks_template_schema.json b/integration/bundle/bundles/dashboards/databricks_template_schema.json similarity index 100% rename from internal/bundle/bundles/dashboards/databricks_template_schema.json rename to integration/bundle/bundles/dashboards/databricks_template_schema.json diff --git a/internal/bundle/bundles/dashboards/template/dashboard.lvdash.json b/integration/bundle/bundles/dashboards/template/dashboard.lvdash.json similarity index 100% rename from internal/bundle/bundles/dashboards/template/dashboard.lvdash.json rename to integration/bundle/bundles/dashboards/template/dashboard.lvdash.json diff --git a/internal/bundle/bundles/dashboards/template/databricks.yml.tmpl b/integration/bundle/bundles/dashboards/template/databricks.yml.tmpl similarity index 100% rename from internal/bundle/bundles/dashboards/template/databricks.yml.tmpl rename to integration/bundle/bundles/dashboards/template/databricks.yml.tmpl diff --git a/internal/bundle/bundles/deploy_then_remove_resources/databricks_template_schema.json b/integration/bundle/bundles/deploy_then_remove_resources/databricks_template_schema.json similarity index 100% rename from internal/bundle/bundles/deploy_then_remove_resources/databricks_template_schema.json rename to integration/bundle/bundles/deploy_then_remove_resources/databricks_template_schema.json diff --git a/internal/bundle/bundles/deploy_then_remove_resources/template/bar.py b/integration/bundle/bundles/deploy_then_remove_resources/template/bar.py similarity index 100% rename from internal/bundle/bundles/deploy_then_remove_resources/template/bar.py rename to integration/bundle/bundles/deploy_then_remove_resources/template/bar.py diff --git a/internal/bundle/bundles/deploy_then_remove_resources/template/databricks.yml.tmpl b/integration/bundle/bundles/deploy_then_remove_resources/template/databricks.yml.tmpl similarity index 100% rename from internal/bundle/bundles/deploy_then_remove_resources/template/databricks.yml.tmpl rename to integration/bundle/bundles/deploy_then_remove_resources/template/databricks.yml.tmpl diff --git a/internal/bundle/bundles/deploy_then_remove_resources/template/foo.py b/integration/bundle/bundles/deploy_then_remove_resources/template/foo.py similarity index 100% rename from internal/bundle/bundles/deploy_then_remove_resources/template/foo.py rename to integration/bundle/bundles/deploy_then_remove_resources/template/foo.py diff --git a/internal/bundle/bundles/deploy_then_remove_resources/template/resources.yml.tmpl b/integration/bundle/bundles/deploy_then_remove_resources/template/resources.yml.tmpl similarity index 100% rename from internal/bundle/bundles/deploy_then_remove_resources/template/resources.yml.tmpl rename to integration/bundle/bundles/deploy_then_remove_resources/template/resources.yml.tmpl diff --git a/internal/bundle/bundles/job_metadata/databricks_template_schema.json b/integration/bundle/bundles/job_metadata/databricks_template_schema.json similarity index 100% rename from internal/bundle/bundles/job_metadata/databricks_template_schema.json rename to integration/bundle/bundles/job_metadata/databricks_template_schema.json diff --git a/internal/bundle/bundles/job_metadata/template/a/b/bar.py b/integration/bundle/bundles/job_metadata/template/a/b/bar.py similarity index 100% rename from internal/bundle/bundles/job_metadata/template/a/b/bar.py rename to integration/bundle/bundles/job_metadata/template/a/b/bar.py diff --git a/internal/bundle/bundles/job_metadata/template/a/b/resources.yml.tmpl b/integration/bundle/bundles/job_metadata/template/a/b/resources.yml.tmpl similarity index 100% rename from internal/bundle/bundles/job_metadata/template/a/b/resources.yml.tmpl rename to integration/bundle/bundles/job_metadata/template/a/b/resources.yml.tmpl diff --git a/internal/bundle/bundles/job_metadata/template/databricks.yml.tmpl b/integration/bundle/bundles/job_metadata/template/databricks.yml.tmpl similarity index 100% rename from internal/bundle/bundles/job_metadata/template/databricks.yml.tmpl rename to integration/bundle/bundles/job_metadata/template/databricks.yml.tmpl diff --git a/internal/bundle/bundles/job_metadata/template/foo.py b/integration/bundle/bundles/job_metadata/template/foo.py similarity index 100% rename from internal/bundle/bundles/job_metadata/template/foo.py rename to integration/bundle/bundles/job_metadata/template/foo.py diff --git a/internal/bundle/bundles/python_wheel_task/databricks_template_schema.json b/integration/bundle/bundles/python_wheel_task/databricks_template_schema.json similarity index 100% rename from internal/bundle/bundles/python_wheel_task/databricks_template_schema.json rename to integration/bundle/bundles/python_wheel_task/databricks_template_schema.json diff --git a/internal/bundle/bundles/python_wheel_task/template/databricks.yml.tmpl b/integration/bundle/bundles/python_wheel_task/template/databricks.yml.tmpl similarity index 100% rename from internal/bundle/bundles/python_wheel_task/template/databricks.yml.tmpl rename to integration/bundle/bundles/python_wheel_task/template/databricks.yml.tmpl diff --git a/internal/bundle/bundles/python_wheel_task/template/setup.py.tmpl b/integration/bundle/bundles/python_wheel_task/template/setup.py.tmpl similarity index 100% rename from internal/bundle/bundles/python_wheel_task/template/setup.py.tmpl rename to integration/bundle/bundles/python_wheel_task/template/setup.py.tmpl diff --git a/internal/bundle/bundles/python_wheel_task/template/{{.project_name}}/__init__.py b/integration/bundle/bundles/python_wheel_task/template/{{.project_name}}/__init__.py similarity index 100% rename from internal/bundle/bundles/python_wheel_task/template/{{.project_name}}/__init__.py rename to integration/bundle/bundles/python_wheel_task/template/{{.project_name}}/__init__.py diff --git a/internal/bundle/bundles/python_wheel_task/template/{{.project_name}}/__main__.py b/integration/bundle/bundles/python_wheel_task/template/{{.project_name}}/__main__.py similarity index 100% rename from internal/bundle/bundles/python_wheel_task/template/{{.project_name}}/__main__.py rename to integration/bundle/bundles/python_wheel_task/template/{{.project_name}}/__main__.py diff --git a/internal/bundle/bundles/python_wheel_task_with_cluster/databricks_template_schema.json b/integration/bundle/bundles/python_wheel_task_with_cluster/databricks_template_schema.json similarity index 100% rename from internal/bundle/bundles/python_wheel_task_with_cluster/databricks_template_schema.json rename to integration/bundle/bundles/python_wheel_task_with_cluster/databricks_template_schema.json diff --git a/internal/bundle/bundles/python_wheel_task_with_cluster/template/databricks.yml.tmpl b/integration/bundle/bundles/python_wheel_task_with_cluster/template/databricks.yml.tmpl similarity index 100% rename from internal/bundle/bundles/python_wheel_task_with_cluster/template/databricks.yml.tmpl rename to integration/bundle/bundles/python_wheel_task_with_cluster/template/databricks.yml.tmpl diff --git a/internal/bundle/bundles/python_wheel_task_with_cluster/template/setup.py.tmpl b/integration/bundle/bundles/python_wheel_task_with_cluster/template/setup.py.tmpl similarity index 100% rename from internal/bundle/bundles/python_wheel_task_with_cluster/template/setup.py.tmpl rename to integration/bundle/bundles/python_wheel_task_with_cluster/template/setup.py.tmpl diff --git a/internal/bundle/bundles/python_wheel_task_with_cluster/template/{{.project_name}}/__init__.py b/integration/bundle/bundles/python_wheel_task_with_cluster/template/{{.project_name}}/__init__.py similarity index 100% rename from internal/bundle/bundles/python_wheel_task_with_cluster/template/{{.project_name}}/__init__.py rename to integration/bundle/bundles/python_wheel_task_with_cluster/template/{{.project_name}}/__init__.py diff --git a/internal/bundle/bundles/python_wheel_task_with_cluster/template/{{.project_name}}/__main__.py b/integration/bundle/bundles/python_wheel_task_with_cluster/template/{{.project_name}}/__main__.py similarity index 100% rename from internal/bundle/bundles/python_wheel_task_with_cluster/template/{{.project_name}}/__main__.py rename to integration/bundle/bundles/python_wheel_task_with_cluster/template/{{.project_name}}/__main__.py diff --git a/internal/bundle/bundles/python_wheel_task_with_environments/databricks_template_schema.json b/integration/bundle/bundles/python_wheel_task_with_environments/databricks_template_schema.json similarity index 100% rename from internal/bundle/bundles/python_wheel_task_with_environments/databricks_template_schema.json rename to integration/bundle/bundles/python_wheel_task_with_environments/databricks_template_schema.json diff --git a/internal/bundle/bundles/python_wheel_task_with_environments/template/databricks.yml.tmpl b/integration/bundle/bundles/python_wheel_task_with_environments/template/databricks.yml.tmpl similarity index 100% rename from internal/bundle/bundles/python_wheel_task_with_environments/template/databricks.yml.tmpl rename to integration/bundle/bundles/python_wheel_task_with_environments/template/databricks.yml.tmpl diff --git a/internal/bundle/bundles/python_wheel_task_with_environments/template/setup.py.tmpl b/integration/bundle/bundles/python_wheel_task_with_environments/template/setup.py.tmpl similarity index 100% rename from internal/bundle/bundles/python_wheel_task_with_environments/template/setup.py.tmpl rename to integration/bundle/bundles/python_wheel_task_with_environments/template/setup.py.tmpl diff --git a/internal/bundle/bundles/python_wheel_task_with_environments/template/{{.project_name}}/__init__.py b/integration/bundle/bundles/python_wheel_task_with_environments/template/{{.project_name}}/__init__.py similarity index 100% rename from internal/bundle/bundles/python_wheel_task_with_environments/template/{{.project_name}}/__init__.py rename to integration/bundle/bundles/python_wheel_task_with_environments/template/{{.project_name}}/__init__.py diff --git a/internal/bundle/bundles/python_wheel_task_with_environments/template/{{.project_name}}/__main__.py b/integration/bundle/bundles/python_wheel_task_with_environments/template/{{.project_name}}/__main__.py similarity index 100% rename from internal/bundle/bundles/python_wheel_task_with_environments/template/{{.project_name}}/__main__.py rename to integration/bundle/bundles/python_wheel_task_with_environments/template/{{.project_name}}/__main__.py diff --git a/internal/bundle/bundles/recreate_pipeline/databricks_template_schema.json b/integration/bundle/bundles/recreate_pipeline/databricks_template_schema.json similarity index 100% rename from internal/bundle/bundles/recreate_pipeline/databricks_template_schema.json rename to integration/bundle/bundles/recreate_pipeline/databricks_template_schema.json diff --git a/internal/bundle/bundles/recreate_pipeline/template/databricks.yml.tmpl b/integration/bundle/bundles/recreate_pipeline/template/databricks.yml.tmpl similarity index 100% rename from internal/bundle/bundles/recreate_pipeline/template/databricks.yml.tmpl rename to integration/bundle/bundles/recreate_pipeline/template/databricks.yml.tmpl diff --git a/internal/bundle/bundles/recreate_pipeline/template/nb.sql b/integration/bundle/bundles/recreate_pipeline/template/nb.sql similarity index 100% rename from internal/bundle/bundles/recreate_pipeline/template/nb.sql rename to integration/bundle/bundles/recreate_pipeline/template/nb.sql diff --git a/internal/bundle/bundles/spark_jar_task/databricks_template_schema.json b/integration/bundle/bundles/spark_jar_task/databricks_template_schema.json similarity index 100% rename from internal/bundle/bundles/spark_jar_task/databricks_template_schema.json rename to integration/bundle/bundles/spark_jar_task/databricks_template_schema.json diff --git a/internal/bundle/bundles/spark_jar_task/template/databricks.yml.tmpl b/integration/bundle/bundles/spark_jar_task/template/databricks.yml.tmpl similarity index 100% rename from internal/bundle/bundles/spark_jar_task/template/databricks.yml.tmpl rename to integration/bundle/bundles/spark_jar_task/template/databricks.yml.tmpl diff --git a/internal/bundle/bundles/spark_jar_task/template/{{.project_name}}/META-INF/MANIFEST.MF b/integration/bundle/bundles/spark_jar_task/template/{{.project_name}}/META-INF/MANIFEST.MF similarity index 100% rename from internal/bundle/bundles/spark_jar_task/template/{{.project_name}}/META-INF/MANIFEST.MF rename to integration/bundle/bundles/spark_jar_task/template/{{.project_name}}/META-INF/MANIFEST.MF diff --git a/internal/bundle/bundles/spark_jar_task/template/{{.project_name}}/PrintArgs.java b/integration/bundle/bundles/spark_jar_task/template/{{.project_name}}/PrintArgs.java similarity index 100% rename from internal/bundle/bundles/spark_jar_task/template/{{.project_name}}/PrintArgs.java rename to integration/bundle/bundles/spark_jar_task/template/{{.project_name}}/PrintArgs.java diff --git a/internal/bundle/bundles/uc_schema/databricks_template_schema.json b/integration/bundle/bundles/uc_schema/databricks_template_schema.json similarity index 100% rename from internal/bundle/bundles/uc_schema/databricks_template_schema.json rename to integration/bundle/bundles/uc_schema/databricks_template_schema.json diff --git a/internal/bundle/bundles/uc_schema/template/databricks.yml.tmpl b/integration/bundle/bundles/uc_schema/template/databricks.yml.tmpl similarity index 93% rename from internal/bundle/bundles/uc_schema/template/databricks.yml.tmpl rename to integration/bundle/bundles/uc_schema/template/databricks.yml.tmpl index 15076ac85..0cb8d4f61 100644 --- a/internal/bundle/bundles/uc_schema/template/databricks.yml.tmpl +++ b/integration/bundle/bundles/uc_schema/template/databricks.yml.tmpl @@ -12,7 +12,6 @@ resources: - notebook: path: ./nb.sql development: true - catalog: main include: - "*.yml" diff --git a/internal/bundle/bundles/uc_schema/template/nb.sql b/integration/bundle/bundles/uc_schema/template/nb.sql similarity index 100% rename from internal/bundle/bundles/uc_schema/template/nb.sql rename to integration/bundle/bundles/uc_schema/template/nb.sql diff --git a/internal/bundle/bundles/uc_schema/template/schema.yml.tmpl b/integration/bundle/bundles/uc_schema/template/schema.yml.tmpl similarity index 91% rename from internal/bundle/bundles/uc_schema/template/schema.yml.tmpl rename to integration/bundle/bundles/uc_schema/template/schema.yml.tmpl index 50067036e..0fcf10453 100644 --- a/internal/bundle/bundles/uc_schema/template/schema.yml.tmpl +++ b/integration/bundle/bundles/uc_schema/template/schema.yml.tmpl @@ -11,3 +11,4 @@ targets: pipelines: foo: target: ${resources.schemas.bar.id} + catalog: main diff --git a/internal/bundle/bundles/volume/databricks_template_schema.json b/integration/bundle/bundles/volume/databricks_template_schema.json similarity index 100% rename from internal/bundle/bundles/volume/databricks_template_schema.json rename to integration/bundle/bundles/volume/databricks_template_schema.json diff --git a/internal/bundle/bundles/volume/template/databricks.yml.tmpl b/integration/bundle/bundles/volume/template/databricks.yml.tmpl similarity index 100% rename from internal/bundle/bundles/volume/template/databricks.yml.tmpl rename to integration/bundle/bundles/volume/template/databricks.yml.tmpl diff --git a/internal/bundle/bundles/volume/template/nb.sql b/integration/bundle/bundles/volume/template/nb.sql similarity index 100% rename from internal/bundle/bundles/volume/template/nb.sql rename to integration/bundle/bundles/volume/template/nb.sql diff --git a/internal/bundle/bundles/with_includes/databricks_template_schema.json b/integration/bundle/bundles/with_includes/databricks_template_schema.json similarity index 100% rename from internal/bundle/bundles/with_includes/databricks_template_schema.json rename to integration/bundle/bundles/with_includes/databricks_template_schema.json diff --git a/internal/bundle/bundles/with_includes/template/databricks.yml.tmpl b/integration/bundle/bundles/with_includes/template/databricks.yml.tmpl similarity index 100% rename from internal/bundle/bundles/with_includes/template/databricks.yml.tmpl rename to integration/bundle/bundles/with_includes/template/databricks.yml.tmpl diff --git a/internal/bundle/clusters_test.go b/integration/bundle/clusters_test.go similarity index 56% rename from internal/bundle/clusters_test.go rename to integration/bundle/clusters_test.go index a961f3ea8..b94b8365e 100644 --- a/internal/bundle/clusters_test.go +++ b/integration/bundle/clusters_test.go @@ -1,55 +1,53 @@ -package bundle +package bundle_test import ( - "fmt" "testing" - "github.com/databricks/cli/internal" - "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/integration/internal/acc" "github.com/databricks/cli/internal/testutil" - "github.com/databricks/cli/libs/env" "github.com/databricks/databricks-sdk-go/service/compute" "github.com/google/uuid" "github.com/stretchr/testify/require" ) -func TestAccDeployBundleWithCluster(t *testing.T) { - ctx, wt := acc.WorkspaceTest(t) - - if testutil.IsAWSCloud(wt.T) { +func TestDeployBundleWithCluster(t *testing.T) { + if testutil.GetCloud(t) == testutil.AWS { t.Skip("Skipping test for AWS cloud because it is not permitted to create clusters") } - nodeTypeId := internal.GetNodeTypeId(env.Get(ctx, "CLOUD_ENV")) + ctx, wt := acc.WorkspaceTest(t) + + nodeTypeId := testutil.GetCloud(t).NodeTypeID() uniqueId := uuid.New().String() - root, err := initTestTemplate(t, ctx, "clusters", map[string]any{ + root := initTestTemplate(t, ctx, "clusters", map[string]any{ "unique_id": uniqueId, "node_type_id": nodeTypeId, "spark_version": defaultSparkVersion, }) - require.NoError(t, err) t.Cleanup(func() { - err = destroyBundle(t, ctx, root) - require.NoError(t, err) + destroyBundle(t, ctx, root) - cluster, err := wt.W.Clusters.GetByClusterName(ctx, fmt.Sprintf("test-cluster-%s", uniqueId)) + cluster, err := wt.W.Clusters.GetByClusterName(ctx, "test-cluster-"+uniqueId) if err != nil { require.ErrorContains(t, err, "does not exist") } else { require.Contains(t, []compute.State{compute.StateTerminated, compute.StateTerminating}, cluster.State) } - }) - err = deployBundle(t, ctx, root) - require.NoError(t, err) + deployBundle(t, ctx, root) // Cluster should exists after bundle deployment - cluster, err := wt.W.Clusters.GetByClusterName(ctx, fmt.Sprintf("test-cluster-%s", uniqueId)) + cluster, err := wt.W.Clusters.GetByClusterName(ctx, "test-cluster-"+uniqueId) require.NoError(t, err) require.NotNil(t, cluster) + if testing.Short() { + t.Log("Skip the job run in short mode") + return + } + out, err := runResource(t, ctx, root, "foo") require.NoError(t, err) require.Contains(t, out, "Hello World!") diff --git a/internal/bundle/dashboards_test.go b/integration/bundle/dashboards_test.go similarity index 73% rename from internal/bundle/dashboards_test.go rename to integration/bundle/dashboards_test.go index 3c2e27c62..a96b657f6 100644 --- a/internal/bundle/dashboards_test.go +++ b/integration/bundle/dashboards_test.go @@ -1,10 +1,11 @@ -package bundle +package bundle_test import ( "fmt" "testing" - "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/integration/internal/acc" + "github.com/databricks/cli/internal/testutil" "github.com/databricks/databricks-sdk-go/service/dashboards" "github.com/databricks/databricks-sdk-go/service/workspace" "github.com/google/uuid" @@ -12,24 +13,21 @@ import ( "github.com/stretchr/testify/require" ) -func TestAccDashboards(t *testing.T) { +func TestDashboards(t *testing.T) { ctx, wt := acc.WorkspaceTest(t) - warehouseID := acc.GetEnvOrSkipTest(t, "TEST_DEFAULT_WAREHOUSE_ID") + warehouseID := testutil.GetEnvOrSkipTest(t, "TEST_DEFAULT_WAREHOUSE_ID") uniqueID := uuid.New().String() - root, err := initTestTemplate(t, ctx, "dashboards", map[string]any{ + root := initTestTemplate(t, ctx, "dashboards", map[string]any{ "unique_id": uniqueID, "warehouse_id": warehouseID, }) - require.NoError(t, err) t.Cleanup(func() { - err = destroyBundle(t, ctx, root) - require.NoError(t, err) + destroyBundle(t, ctx, root) }) - err = deployBundle(t, ctx, root) - require.NoError(t, err) + deployBundle(t, ctx, root) // Load bundle configuration by running the validate command. b := unmarshalConfig(t, mustValidateBundle(t, ctx, root)) @@ -42,7 +40,7 @@ func TestAccDashboards(t *testing.T) { // Load the dashboard by its ID and confirm its display name. dashboard, err := wt.W.Lakeview.GetByDashboardId(ctx, oi.ResourceId) require.NoError(t, err) - assert.Equal(t, fmt.Sprintf("test-dashboard-%s", uniqueID), dashboard.DisplayName) + assert.Equal(t, "test-dashboard-"+uniqueID, dashboard.DisplayName) // Make an out of band modification to the dashboard and confirm that it is detected. _, err = wt.W.Lakeview.Update(ctx, dashboards.UpdateDashboardRequest{ @@ -54,12 +52,11 @@ func TestAccDashboards(t *testing.T) { require.NoError(t, err) // Try to redeploy the bundle and confirm that the out of band modification is detected. - stdout, _, err := deployBundleWithArgs(t, ctx, root) + stdout, _, err := deployBundleWithArgsErr(t, ctx, root) require.Error(t, err) assert.Contains(t, stdout, `Error: dashboard "file_reference" has been modified remotely`+"\n") // Redeploy the bundle with the --force flag and confirm that the out of band modification is ignored. - _, stderr, err := deployBundleWithArgs(t, ctx, root, "--force") - require.NoError(t, err) + _, stderr := deployBundleWithArgs(t, ctx, root, "--force") assert.Contains(t, stderr, `Deployment complete!`+"\n") } diff --git a/internal/bundle/deploy_test.go b/integration/bundle/deploy_test.go similarity index 79% rename from internal/bundle/deploy_test.go rename to integration/bundle/deploy_test.go index 759e85de5..309b82917 100644 --- a/internal/bundle/deploy_test.go +++ b/integration/bundle/deploy_test.go @@ -1,8 +1,7 @@ -package bundle +package bundle_test import ( "context" - "errors" "fmt" "io" "os" @@ -11,8 +10,9 @@ import ( "testing" "github.com/databricks/cli/cmd/root" - "github.com/databricks/cli/internal" - "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/integration/internal/acc" + "github.com/databricks/cli/internal/testcli" + "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/env" "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/apierr" @@ -24,13 +24,11 @@ import ( ) func setupUcSchemaBundle(t *testing.T, ctx context.Context, w *databricks.WorkspaceClient, uniqueId string) string { - bundleRoot, err := initTestTemplate(t, ctx, "uc_schema", map[string]any{ + bundleRoot := initTestTemplate(t, ctx, "uc_schema", map[string]any{ "unique_id": uniqueId, }) - require.NoError(t, err) - err = deployBundle(t, ctx, bundleRoot) - require.NoError(t, err) + deployBundle(t, ctx, bundleRoot) t.Cleanup(func() { destroyBundle(t, ctx, bundleRoot) @@ -80,7 +78,7 @@ func setupUcSchemaBundle(t *testing.T, ctx context.Context, w *databricks.Worksp return bundleRoot } -func TestAccBundleDeployUcSchema(t *testing.T) { +func TestBundleDeployUcSchema(t *testing.T) { ctx, wt := acc.UcWorkspaceTest(t) w := wt.W @@ -95,17 +93,16 @@ func TestAccBundleDeployUcSchema(t *testing.T) { require.NoError(t, err) // Redeploy the bundle - err = deployBundle(t, ctx, bundleRoot) - require.NoError(t, err) + deployBundle(t, ctx, bundleRoot) // Assert the schema is deleted _, err = w.Schemas.GetByFullName(ctx, strings.Join([]string{catalogName, schemaName}, ".")) apiErr := &apierr.APIError{} - assert.True(t, errors.As(err, &apiErr)) + assert.ErrorAs(t, err, &apiErr) assert.Equal(t, "SCHEMA_DOES_NOT_EXIST", apiErr.ErrorCode) } -func TestAccBundleDeployUcSchemaFailsWithoutAutoApprove(t *testing.T) { +func TestBundleDeployUcSchemaFailsWithoutAutoApprove(t *testing.T) { ctx, wt := acc.UcWorkspaceTest(t) w := wt.W @@ -117,9 +114,9 @@ func TestAccBundleDeployUcSchemaFailsWithoutAutoApprove(t *testing.T) { require.NoError(t, err) // Redeploy the bundle - t.Setenv("BUNDLE_ROOT", bundleRoot) - t.Setenv("TERM", "dumb") - c := internal.NewCobraTestRunnerWithContext(t, ctx, "bundle", "deploy", "--force-lock") + ctx = env.Set(ctx, "BUNDLE_ROOT", bundleRoot) + ctx = env.Set(ctx, "TERM", "dumb") + c := testcli.NewRunner(t, ctx, "bundle", "deploy", "--force-lock") stdout, stderr, err := c.Run() assert.EqualError(t, err, root.ErrAlreadyPrinted.Error()) @@ -127,22 +124,20 @@ func TestAccBundleDeployUcSchemaFailsWithoutAutoApprove(t *testing.T) { assert.Contains(t, stdout.String(), "the deployment requires destructive actions, but current console does not support prompting. Please specify --auto-approve if you would like to skip prompts and proceed") } -func TestAccBundlePipelineDeleteWithoutAutoApprove(t *testing.T) { +func TestBundlePipelineDeleteWithoutAutoApprove(t *testing.T) { ctx, wt := acc.WorkspaceTest(t) w := wt.W - nodeTypeId := internal.GetNodeTypeId(env.Get(ctx, "CLOUD_ENV")) + nodeTypeId := testutil.GetCloud(t).NodeTypeID() uniqueId := uuid.New().String() - bundleRoot, err := initTestTemplate(t, ctx, "deploy_then_remove_resources", map[string]any{ + bundleRoot := initTestTemplate(t, ctx, "deploy_then_remove_resources", map[string]any{ "unique_id": uniqueId, "node_type_id": nodeTypeId, "spark_version": defaultSparkVersion, }) - require.NoError(t, err) // deploy pipeline - err = deployBundle(t, ctx, bundleRoot) - require.NoError(t, err) + deployBundle(t, ctx, bundleRoot) // assert pipeline is created pipelineName := "test-bundle-pipeline-" + uniqueId @@ -161,9 +156,9 @@ func TestAccBundlePipelineDeleteWithoutAutoApprove(t *testing.T) { require.NoError(t, err) // Redeploy the bundle. Expect it to fail because deleting the pipeline requires --auto-approve. - t.Setenv("BUNDLE_ROOT", bundleRoot) - t.Setenv("TERM", "dumb") - c := internal.NewCobraTestRunnerWithContext(t, ctx, "bundle", "deploy", "--force-lock") + ctx = env.Set(ctx, "BUNDLE_ROOT", bundleRoot) + ctx = env.Set(ctx, "TERM", "dumb") + c := testcli.NewRunner(t, ctx, "bundle", "deploy", "--force-lock") stdout, stderr, err := c.Run() assert.EqualError(t, err, root.ErrAlreadyPrinted.Error()) @@ -173,21 +168,18 @@ restore the defined STs and MVs through full refresh. Note that recreation is ne properties such as the 'catalog' or 'storage' are changed: delete pipeline bar`) assert.Contains(t, stdout.String(), "the deployment requires destructive actions, but current console does not support prompting. Please specify --auto-approve if you would like to skip prompts and proceed") - } -func TestAccBundlePipelineRecreateWithoutAutoApprove(t *testing.T) { +func TestBundlePipelineRecreateWithoutAutoApprove(t *testing.T) { ctx, wt := acc.UcWorkspaceTest(t) w := wt.W uniqueId := uuid.New().String() - bundleRoot, err := initTestTemplate(t, ctx, "recreate_pipeline", map[string]any{ + bundleRoot := initTestTemplate(t, ctx, "recreate_pipeline", map[string]any{ "unique_id": uniqueId, }) - require.NoError(t, err) - err = deployBundle(t, ctx, bundleRoot) - require.NoError(t, err) + deployBundle(t, ctx, bundleRoot) t.Cleanup(func() { destroyBundle(t, ctx, bundleRoot) @@ -200,9 +192,9 @@ func TestAccBundlePipelineRecreateWithoutAutoApprove(t *testing.T) { require.Equal(t, pipelineName, pipeline.Name) // Redeploy the bundle, pointing the DLT pipeline to a different UC catalog. - t.Setenv("BUNDLE_ROOT", bundleRoot) - t.Setenv("TERM", "dumb") - c := internal.NewCobraTestRunnerWithContext(t, ctx, "bundle", "deploy", "--force-lock", "--var=\"catalog=whatever\"") + ctx = env.Set(ctx, "BUNDLE_ROOT", bundleRoot) + ctx = env.Set(ctx, "TERM", "dumb") + c := testcli.NewRunner(t, ctx, "bundle", "deploy", "--force-lock", "--var=\"catalog=whatever\"") stdout, stderr, err := c.Run() assert.EqualError(t, err, root.ErrAlreadyPrinted.Error()) @@ -214,27 +206,25 @@ properties such as the 'catalog' or 'storage' are changed: assert.Contains(t, stdout.String(), "the deployment requires destructive actions, but current console does not support prompting. Please specify --auto-approve if you would like to skip prompts and proceed") } -func TestAccDeployBasicBundleLogs(t *testing.T) { +func TestDeployBasicBundleLogs(t *testing.T) { ctx, wt := acc.WorkspaceTest(t) - nodeTypeId := internal.GetNodeTypeId(env.Get(ctx, "CLOUD_ENV")) + nodeTypeId := testutil.GetCloud(t).NodeTypeID() uniqueId := uuid.New().String() - root, err := initTestTemplate(t, ctx, "basic", map[string]any{ + root := initTestTemplate(t, ctx, "basic", map[string]any{ "unique_id": uniqueId, "node_type_id": nodeTypeId, "spark_version": defaultSparkVersion, }) - require.NoError(t, err) t.Cleanup(func() { - err = destroyBundle(t, ctx, root) - require.NoError(t, err) + destroyBundle(t, ctx, root) }) currentUser, err := wt.W.CurrentUser.Me(ctx) require.NoError(t, err) - stdout, stderr := blackBoxRun(t, root, "bundle", "deploy") + stdout, stderr := blackBoxRun(t, ctx, root, "bundle", "deploy") assert.Equal(t, strings.Join([]string{ fmt.Sprintf("Uploading bundle files to /Workspace/Users/%s/.bundle/%s/files...", currentUser.UserName, uniqueId), "Deploying resources...", @@ -244,18 +234,16 @@ func TestAccDeployBasicBundleLogs(t *testing.T) { assert.Equal(t, "", stdout) } -func TestAccDeployUcVolume(t *testing.T) { +func TestDeployUcVolume(t *testing.T) { ctx, wt := acc.UcWorkspaceTest(t) w := wt.W uniqueId := uuid.New().String() - bundleRoot, err := initTestTemplate(t, ctx, "volume", map[string]any{ + bundleRoot := initTestTemplate(t, ctx, "volume", map[string]any{ "unique_id": uniqueId, }) - require.NoError(t, err) - err = deployBundle(t, ctx, bundleRoot) - require.NoError(t, err) + deployBundle(t, ctx, bundleRoot) t.Cleanup(func() { destroyBundle(t, ctx, bundleRoot) @@ -280,9 +268,9 @@ func TestAccDeployUcVolume(t *testing.T) { assert.Equal(t, []catalog.Privilege{catalog.PrivilegeWriteVolume}, grants.PrivilegeAssignments[0].Privileges) // Recreation of the volume without --auto-approve should fail since prompting is not possible - t.Setenv("TERM", "dumb") - t.Setenv("BUNDLE_ROOT", bundleRoot) - stdout, stderr, err := internal.NewCobraTestRunnerWithContext(t, ctx, "bundle", "deploy", "--var=schema_name=${resources.schemas.schema2.name}").Run() + ctx = env.Set(ctx, "BUNDLE_ROOT", bundleRoot) + ctx = env.Set(ctx, "TERM", "dumb") + stdout, stderr, err := testcli.NewRunner(t, ctx, "bundle", "deploy", "--var=schema_name=${resources.schemas.schema2.name}").Run() assert.Error(t, err) assert.Contains(t, stderr.String(), `This action will result in the deletion or recreation of the following volumes. For managed volumes, the files stored in the volume are also deleted from your @@ -292,9 +280,9 @@ is removed from the catalog, but the underlying files are not deleted: assert.Contains(t, stdout.String(), "the deployment requires destructive actions, but current console does not support prompting. Please specify --auto-approve if you would like to skip prompts and proceed") // Successfully recreate the volume with --auto-approve - t.Setenv("TERM", "dumb") - t.Setenv("BUNDLE_ROOT", bundleRoot) - _, _, err = internal.NewCobraTestRunnerWithContext(t, ctx, "bundle", "deploy", "--var=schema_name=${resources.schemas.schema2.name}", "--auto-approve").Run() + ctx = env.Set(ctx, "BUNDLE_ROOT", bundleRoot) + ctx = env.Set(ctx, "TERM", "dumb") + _, _, err = testcli.NewRunner(t, ctx, "bundle", "deploy", "--var=schema_name=${resources.schemas.schema2.name}", "--auto-approve").Run() assert.NoError(t, err) // Assert the volume is updated successfully diff --git a/internal/bundle/deploy_then_remove_resources_test.go b/integration/bundle/deploy_then_remove_resources_test.go similarity index 67% rename from internal/bundle/deploy_then_remove_resources_test.go rename to integration/bundle/deploy_then_remove_resources_test.go index 66ec5c16a..052d84dd6 100644 --- a/internal/bundle/deploy_then_remove_resources_test.go +++ b/integration/bundle/deploy_then_remove_resources_test.go @@ -1,34 +1,31 @@ -package bundle +package bundle_test import ( "os" "path/filepath" "testing" - "github.com/databricks/cli/internal" - "github.com/databricks/cli/internal/acc" - "github.com/databricks/cli/libs/env" + "github.com/databricks/cli/integration/internal/acc" + "github.com/databricks/cli/internal/testutil" "github.com/google/uuid" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) -func TestAccBundleDeployThenRemoveResources(t *testing.T) { +func TestBundleDeployThenRemoveResources(t *testing.T) { ctx, wt := acc.WorkspaceTest(t) w := wt.W - nodeTypeId := internal.GetNodeTypeId(env.Get(ctx, "CLOUD_ENV")) + nodeTypeId := testutil.GetCloud(t).NodeTypeID() uniqueId := uuid.New().String() - bundleRoot, err := initTestTemplate(t, ctx, "deploy_then_remove_resources", map[string]any{ + bundleRoot := initTestTemplate(t, ctx, "deploy_then_remove_resources", map[string]any{ "unique_id": uniqueId, "node_type_id": nodeTypeId, "spark_version": defaultSparkVersion, }) - require.NoError(t, err) // deploy pipeline - err = deployBundle(t, ctx, bundleRoot) - require.NoError(t, err) + deployBundle(t, ctx, bundleRoot) // assert pipeline is created pipelineName := "test-bundle-pipeline-" + uniqueId @@ -47,8 +44,7 @@ func TestAccBundleDeployThenRemoveResources(t *testing.T) { require.NoError(t, err) // deploy again - err = deployBundle(t, ctx, bundleRoot) - require.NoError(t, err) + deployBundle(t, ctx, bundleRoot) // assert pipeline is deleted _, err = w.Pipelines.GetByName(ctx, pipelineName) @@ -59,7 +55,6 @@ func TestAccBundleDeployThenRemoveResources(t *testing.T) { assert.ErrorContains(t, err, "does not exist") t.Cleanup(func() { - err = destroyBundle(t, ctx, bundleRoot) - require.NoError(t, err) + destroyBundle(t, ctx, bundleRoot) }) } diff --git a/integration/bundle/deploy_to_shared_test.go b/integration/bundle/deploy_to_shared_test.go new file mode 100644 index 000000000..387d3c67a --- /dev/null +++ b/integration/bundle/deploy_to_shared_test.go @@ -0,0 +1,33 @@ +package bundle_test + +import ( + "testing" + + "github.com/databricks/cli/integration/internal/acc" + "github.com/databricks/cli/internal/testutil" + "github.com/google/uuid" + "github.com/stretchr/testify/require" +) + +func TestDeployBasicToSharedWorkspacePath(t *testing.T) { + ctx, wt := acc.WorkspaceTest(t) + + nodeTypeId := testutil.GetCloud(t).NodeTypeID() + uniqueId := uuid.New().String() + + currentUser, err := wt.W.CurrentUser.Me(ctx) + require.NoError(t, err) + + bundleRoot := initTestTemplate(t, ctx, "basic", map[string]any{ + "unique_id": uniqueId, + "node_type_id": nodeTypeId, + "spark_version": defaultSparkVersion, + "root_path": "/Shared/" + currentUser.UserName, + }) + + t.Cleanup(func() { + destroyBundle(wt, ctx, bundleRoot) + }) + + deployBundle(wt, ctx, bundleRoot) +} diff --git a/internal/bundle/deployment_state_test.go b/integration/bundle/deployment_state_test.go similarity index 77% rename from internal/bundle/deployment_state_test.go rename to integration/bundle/deployment_state_test.go index 25f36d4a2..fff1504d2 100644 --- a/internal/bundle/deployment_state_test.go +++ b/integration/bundle/deployment_state_test.go @@ -1,4 +1,4 @@ -package bundle +package bundle_test import ( "os" @@ -7,43 +7,39 @@ import ( "testing" "github.com/databricks/cli/bundle/deploy" - "github.com/databricks/cli/internal" - "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/integration/internal/acc" + "github.com/databricks/cli/internal/testutil" + "github.com/databricks/cli/libs/env" "github.com/google/uuid" "github.com/stretchr/testify/require" ) -func TestAccFilesAreSyncedCorrectlyWhenNoSnapshot(t *testing.T) { - env := internal.GetEnvOrSkipTest(t, "CLOUD_ENV") - t.Log(env) - +func TestFilesAreSyncedCorrectlyWhenNoSnapshot(t *testing.T) { ctx, wt := acc.WorkspaceTest(t) w := wt.W - nodeTypeId := internal.GetNodeTypeId(env) + nodeTypeId := testutil.GetCloud(t).NodeTypeID() uniqueId := uuid.New().String() - bundleRoot, err := initTestTemplate(t, ctx, "basic", map[string]any{ + bundleRoot := initTestTemplate(t, ctx, "basic", map[string]any{ "unique_id": uniqueId, "spark_version": "13.3.x-scala2.12", "node_type_id": nodeTypeId, }) - require.NoError(t, err) - t.Setenv("BUNDLE_ROOT", bundleRoot) + ctx = env.Set(ctx, "BUNDLE_ROOT", bundleRoot) // Add some test file to the bundle - err = os.WriteFile(filepath.Join(bundleRoot, "test.py"), []byte("print('Hello, World!')"), 0644) + err := os.WriteFile(filepath.Join(bundleRoot, "test.py"), []byte("print('Hello, World!')"), 0o644) require.NoError(t, err) - err = os.WriteFile(filepath.Join(bundleRoot, "test_to_modify.py"), []byte("print('Hello, World!')"), 0644) + err = os.WriteFile(filepath.Join(bundleRoot, "test_to_modify.py"), []byte("print('Hello, World!')"), 0o644) require.NoError(t, err) // Add notebook to the bundle - err = os.WriteFile(filepath.Join(bundleRoot, "notebook.py"), []byte("# Databricks notebook source\nHello, World!"), 0644) + err = os.WriteFile(filepath.Join(bundleRoot, "notebook.py"), []byte("# Databricks notebook source\nHello, World!"), 0o644) require.NoError(t, err) - err = deployBundle(t, ctx, bundleRoot) - require.NoError(t, err) + deployBundle(t, ctx, bundleRoot) t.Cleanup(func() { destroyBundle(t, ctx, bundleRoot) @@ -79,11 +75,10 @@ func TestAccFilesAreSyncedCorrectlyWhenNoSnapshot(t *testing.T) { require.NoError(t, err) // Modify the content of another file - err = os.WriteFile(filepath.Join(bundleRoot, "test_to_modify.py"), []byte("print('Modified!')"), 0644) + err = os.WriteFile(filepath.Join(bundleRoot, "test_to_modify.py"), []byte("print('Modified!')"), 0o644) require.NoError(t, err) - err = deployBundle(t, ctx, bundleRoot) - require.NoError(t, err) + deployBundle(t, ctx, bundleRoot) // Check that removed file is not in workspace anymore _, err = w.Workspace.GetStatusByPath(ctx, path.Join(remoteRoot, "files", "test.py")) diff --git a/internal/bundle/destroy_test.go b/integration/bundle/destroy_test.go similarity index 75% rename from internal/bundle/destroy_test.go rename to integration/bundle/destroy_test.go index baccf4e6f..b69382a58 100644 --- a/internal/bundle/destroy_test.go +++ b/integration/bundle/destroy_test.go @@ -1,42 +1,38 @@ -package bundle +package bundle_test import ( - "errors" "os" "path/filepath" "testing" - "github.com/databricks/cli/internal" - "github.com/databricks/cli/internal/acc" - "github.com/databricks/cli/libs/env" + "github.com/databricks/cli/integration/internal/acc" + "github.com/databricks/cli/internal/testutil" "github.com/databricks/databricks-sdk-go/apierr" "github.com/google/uuid" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) -func TestAccBundleDestroy(t *testing.T) { +func TestBundleDestroy(t *testing.T) { ctx, wt := acc.WorkspaceTest(t) w := wt.W - nodeTypeId := internal.GetNodeTypeId(env.Get(ctx, "CLOUD_ENV")) + nodeTypeId := testutil.GetCloud(t).NodeTypeID() uniqueId := uuid.New().String() - bundleRoot, err := initTestTemplate(t, ctx, "deploy_then_remove_resources", map[string]any{ + bundleRoot := initTestTemplate(t, ctx, "deploy_then_remove_resources", map[string]any{ "unique_id": uniqueId, "node_type_id": nodeTypeId, "spark_version": defaultSparkVersion, }) - require.NoError(t, err) snapshotsDir := filepath.Join(bundleRoot, ".databricks", "bundle", "default", "sync-snapshots") // Assert the snapshot file does not exist - _, err = os.ReadDir(snapshotsDir) + _, err := os.ReadDir(snapshotsDir) assert.ErrorIs(t, err, os.ErrNotExist) // deploy resources - err = deployBundle(t, ctx, bundleRoot) - require.NoError(t, err) + deployBundle(t, ctx, bundleRoot) // Assert the snapshot file exists entries, err := os.ReadDir(snapshotsDir) @@ -61,8 +57,7 @@ func TestAccBundleDestroy(t *testing.T) { assert.Equal(t, job.Settings.Name, jobName) // destroy bundle - err = destroyBundle(t, ctx, bundleRoot) - require.NoError(t, err) + destroyBundle(t, ctx, bundleRoot) // assert pipeline is deleted _, err = w.Pipelines.GetByName(ctx, pipelineName) @@ -75,11 +70,11 @@ func TestAccBundleDestroy(t *testing.T) { // Assert snapshot file is deleted entries, err = os.ReadDir(snapshotsDir) require.NoError(t, err) - assert.Len(t, entries, 0) + assert.Empty(t, entries) // Assert bundle deployment path is deleted _, err = w.Workspace.GetStatusByPath(ctx, remoteRoot) apiErr := &apierr.APIError{} - assert.True(t, errors.As(err, &apiErr)) + assert.ErrorAs(t, err, &apiErr) assert.Equal(t, "RESOURCE_DOES_NOT_EXIST", apiErr.ErrorCode) } diff --git a/internal/bundle/empty_bundle_test.go b/integration/bundle/empty_bundle_test.go similarity index 57% rename from internal/bundle/empty_bundle_test.go rename to integration/bundle/empty_bundle_test.go index 36883ae00..2c650cbef 100644 --- a/internal/bundle/empty_bundle_test.go +++ b/integration/bundle/empty_bundle_test.go @@ -1,17 +1,16 @@ -package bundle +package bundle_test import ( - "fmt" "os" "path/filepath" "testing" - "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/integration/internal/acc" "github.com/google/uuid" "github.com/stretchr/testify/require" ) -func TestAccEmptyBundleDeploy(t *testing.T) { +func TestEmptyBundleDeploy(t *testing.T) { ctx, _ := acc.WorkspaceTest(t) // create empty bundle @@ -19,18 +18,15 @@ func TestAccEmptyBundleDeploy(t *testing.T) { f, err := os.Create(filepath.Join(tmpDir, "databricks.yml")) require.NoError(t, err) - bundleRoot := fmt.Sprintf(`bundle: - name: %s`, uuid.New().String()) + bundleRoot := "bundle:\n name: " + uuid.New().String() _, err = f.WriteString(bundleRoot) require.NoError(t, err) f.Close() // deploy empty bundle - err = deployBundle(t, ctx, tmpDir) - require.NoError(t, err) + deployBundle(t, ctx, tmpDir) t.Cleanup(func() { - err = destroyBundle(t, ctx, tmpDir) - require.NoError(t, err) + destroyBundle(t, ctx, tmpDir) }) } diff --git a/internal/bundle/environments_test.go b/integration/bundle/environments_test.go similarity index 71% rename from internal/bundle/environments_test.go rename to integration/bundle/environments_test.go index 5cffe8857..e0dc91532 100644 --- a/internal/bundle/environments_test.go +++ b/integration/bundle/environments_test.go @@ -1,25 +1,23 @@ -package bundle +package bundle_test import ( "testing" - "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/integration/internal/acc" "github.com/google/uuid" "github.com/stretchr/testify/require" ) -func TestAccPythonWheelTaskWithEnvironmentsDeployAndRun(t *testing.T) { +func TestPythonWheelTaskWithEnvironmentsDeployAndRun(t *testing.T) { t.Skip("Skipping test until serveless is enabled") ctx, _ := acc.WorkspaceTest(t) - bundleRoot, err := initTestTemplate(t, ctx, "python_wheel_task_with_environments", map[string]any{ + bundleRoot := initTestTemplate(t, ctx, "python_wheel_task_with_environments", map[string]any{ "unique_id": uuid.New().String(), }) - require.NoError(t, err) - err = deployBundle(t, ctx, bundleRoot) - require.NoError(t, err) + deployBundle(t, ctx, bundleRoot) t.Cleanup(func() { destroyBundle(t, ctx, bundleRoot) diff --git a/internal/bundle/generate_job_test.go b/integration/bundle/generate_job_test.go similarity index 69% rename from internal/bundle/generate_job_test.go rename to integration/bundle/generate_job_test.go index 847a7a14e..f3c4c7829 100644 --- a/internal/bundle/generate_job_test.go +++ b/integration/bundle/generate_job_test.go @@ -1,17 +1,18 @@ -package bundle +package bundle_test import ( "context" - "fmt" "os" "path" "path/filepath" + "strconv" "strings" "testing" - "github.com/databricks/cli/internal" - "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/integration/internal/acc" + "github.com/databricks/cli/internal/testcli" "github.com/databricks/cli/internal/testutil" + "github.com/databricks/cli/libs/env" "github.com/databricks/cli/libs/filer" "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/service/compute" @@ -20,27 +21,26 @@ import ( "github.com/stretchr/testify/require" ) -func TestAccGenerateFromExistingJobAndDeploy(t *testing.T) { +func TestGenerateFromExistingJobAndDeploy(t *testing.T) { ctx, wt := acc.WorkspaceTest(t) - gt := &generateJobTest{T: t, w: wt.W} + gt := &generateJobTest{T: wt, w: wt.W} uniqueId := uuid.New().String() - bundleRoot, err := initTestTemplate(t, ctx, "with_includes", map[string]any{ + bundleRoot := initTestTemplate(t, ctx, "with_includes", map[string]any{ "unique_id": uniqueId, }) - require.NoError(t, err) jobId := gt.createTestJob(ctx) t.Cleanup(func() { gt.destroyJob(ctx, jobId) }) - t.Setenv("BUNDLE_ROOT", bundleRoot) - c := internal.NewCobraTestRunnerWithContext(t, ctx, "bundle", "generate", "job", - "--existing-job-id", fmt.Sprint(jobId), + ctx = env.Set(ctx, "BUNDLE_ROOT", bundleRoot) + c := testcli.NewRunner(t, ctx, "bundle", "generate", "job", + "--existing-job-id", strconv.FormatInt(jobId, 10), "--config-dir", filepath.Join(bundleRoot, "resources"), "--source-dir", filepath.Join(bundleRoot, "src")) - _, _, err = c.Run() + _, _, err := c.Run() require.NoError(t, err) _, err = os.Stat(filepath.Join(bundleRoot, "src", "test.py")) @@ -55,21 +55,19 @@ func TestAccGenerateFromExistingJobAndDeploy(t *testing.T) { require.NoError(t, err) generatedYaml := string(data) require.Contains(t, generatedYaml, "notebook_task:") - require.Contains(t, generatedYaml, fmt.Sprintf("notebook_path: %s", filepath.Join("..", "src", "test.py"))) + require.Contains(t, generatedYaml, "notebook_path: "+filepath.Join("..", "src", "test.py")) require.Contains(t, generatedYaml, "task_key: test") require.Contains(t, generatedYaml, "new_cluster:") require.Contains(t, generatedYaml, "spark_version: 13.3.x-scala2.12") require.Contains(t, generatedYaml, "num_workers: 1") - err = deployBundle(t, ctx, bundleRoot) - require.NoError(t, err) + deployBundle(t, ctx, bundleRoot) - err = destroyBundle(t, ctx, bundleRoot) - require.NoError(t, err) + destroyBundle(t, ctx, bundleRoot) } type generateJobTest struct { - T *testing.T + T *acc.WorkspaceT w *databricks.WorkspaceClient } @@ -77,17 +75,7 @@ func (gt *generateJobTest) createTestJob(ctx context.Context) int64 { t := gt.T w := gt.w - var nodeTypeId string - switch testutil.GetCloud(t) { - case testutil.AWS: - nodeTypeId = "i3.xlarge" - case testutil.Azure: - nodeTypeId = "Standard_DS4_v2" - case testutil.GCP: - nodeTypeId = "n1-standard-4" - } - - tmpdir := internal.TemporaryWorkspaceDir(t, w) + tmpdir := acc.TemporaryWorkspaceDir(t, "generate-job-") f, err := filer.NewWorkspaceFilesClient(w, tmpdir) require.NoError(t, err) @@ -95,14 +83,14 @@ func (gt *generateJobTest) createTestJob(ctx context.Context) int64 { require.NoError(t, err) resp, err := w.Jobs.Create(ctx, jobs.CreateJob{ - Name: internal.RandomName("generated-job-"), + Name: testutil.RandomName("generated-job-"), Tasks: []jobs.Task{ { TaskKey: "test", NewCluster: &compute.ClusterSpec{ SparkVersion: "13.3.x-scala2.12", NumWorkers: 1, - NodeTypeId: nodeTypeId, + NodeTypeId: testutil.GetCloud(t).NodeTypeID(), SparkConf: map[string]string{ "spark.databricks.enableWsfs": "true", "spark.databricks.hive.metastore.glueCatalog.enabled": "true", diff --git a/internal/bundle/generate_pipeline_test.go b/integration/bundle/generate_pipeline_test.go similarity index 71% rename from internal/bundle/generate_pipeline_test.go rename to integration/bundle/generate_pipeline_test.go index 82467952d..3565ab928 100644 --- a/internal/bundle/generate_pipeline_test.go +++ b/integration/bundle/generate_pipeline_test.go @@ -1,16 +1,17 @@ -package bundle +package bundle_test import ( "context" - "fmt" "os" "path" "path/filepath" "strings" "testing" - "github.com/databricks/cli/internal" - "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/integration/internal/acc" + "github.com/databricks/cli/internal/testcli" + "github.com/databricks/cli/internal/testutil" + "github.com/databricks/cli/libs/env" "github.com/databricks/cli/libs/filer" "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/service/pipelines" @@ -18,27 +19,26 @@ import ( "github.com/stretchr/testify/require" ) -func TestAccGenerateFromExistingPipelineAndDeploy(t *testing.T) { +func TestGenerateFromExistingPipelineAndDeploy(t *testing.T) { ctx, wt := acc.WorkspaceTest(t) - gt := &generatePipelineTest{T: t, w: wt.W} + gt := &generatePipelineTest{T: wt, w: wt.W} uniqueId := uuid.New().String() - bundleRoot, err := initTestTemplate(t, ctx, "with_includes", map[string]any{ + bundleRoot := initTestTemplate(t, ctx, "with_includes", map[string]any{ "unique_id": uniqueId, }) - require.NoError(t, err) pipelineId, name := gt.createTestPipeline(ctx) t.Cleanup(func() { gt.destroyPipeline(ctx, pipelineId) }) - t.Setenv("BUNDLE_ROOT", bundleRoot) - c := internal.NewCobraTestRunnerWithContext(t, ctx, "bundle", "generate", "pipeline", - "--existing-pipeline-id", fmt.Sprint(pipelineId), + ctx = env.Set(ctx, "BUNDLE_ROOT", bundleRoot) + c := testcli.NewRunner(t, ctx, "bundle", "generate", "pipeline", + "--existing-pipeline-id", pipelineId, "--config-dir", filepath.Join(bundleRoot, "resources"), "--source-dir", filepath.Join(bundleRoot, "src")) - _, _, err = c.Run() + _, _, err := c.Run() require.NoError(t, err) _, err = os.Stat(filepath.Join(bundleRoot, "src", "notebook.py")) @@ -58,25 +58,23 @@ func TestAccGenerateFromExistingPipelineAndDeploy(t *testing.T) { generatedYaml := string(data) // Replace pipeline name - generatedYaml = strings.ReplaceAll(generatedYaml, name, internal.RandomName("copy-generated-pipeline-")) - err = os.WriteFile(fileName, []byte(generatedYaml), 0644) + generatedYaml = strings.ReplaceAll(generatedYaml, name, testutil.RandomName("copy-generated-pipeline-")) + err = os.WriteFile(fileName, []byte(generatedYaml), 0o644) require.NoError(t, err) require.Contains(t, generatedYaml, "libraries:") require.Contains(t, generatedYaml, "- notebook:") - require.Contains(t, generatedYaml, fmt.Sprintf("path: %s", filepath.Join("..", "src", "notebook.py"))) + require.Contains(t, generatedYaml, "path: "+filepath.Join("..", "src", "notebook.py")) require.Contains(t, generatedYaml, "- file:") - require.Contains(t, generatedYaml, fmt.Sprintf("path: %s", filepath.Join("..", "src", "test.py"))) + require.Contains(t, generatedYaml, "path: "+filepath.Join("..", "src", "test.py")) - err = deployBundle(t, ctx, bundleRoot) - require.NoError(t, err) + deployBundle(t, ctx, bundleRoot) - err = destroyBundle(t, ctx, bundleRoot) - require.NoError(t, err) + destroyBundle(t, ctx, bundleRoot) } type generatePipelineTest struct { - T *testing.T + T *acc.WorkspaceT w *databricks.WorkspaceClient } @@ -84,7 +82,7 @@ func (gt *generatePipelineTest) createTestPipeline(ctx context.Context) (string, t := gt.T w := gt.w - tmpdir := internal.TemporaryWorkspaceDir(t, w) + tmpdir := acc.TemporaryWorkspaceDir(t, "generate-pipeline-") f, err := filer.NewWorkspaceFilesClient(w, tmpdir) require.NoError(t, err) @@ -94,10 +92,9 @@ func (gt *generatePipelineTest) createTestPipeline(ctx context.Context) (string, err = f.Write(ctx, "test.py", strings.NewReader("print('Hello!')")) require.NoError(t, err) - env := internal.GetEnvOrSkipTest(t, "CLOUD_ENV") - nodeTypeId := internal.GetNodeTypeId(env) + nodeTypeId := testutil.GetCloud(t).NodeTypeID() - name := internal.RandomName("generated-pipeline-") + name := testutil.RandomName("generated-pipeline-") resp, err := w.Pipelines.Create(ctx, pipelines.CreatePipeline{ Name: name, Libraries: []pipelines.PipelineLibrary{ diff --git a/internal/bundle/helpers.go b/integration/bundle/helpers_test.go similarity index 50% rename from internal/bundle/helpers.go rename to integration/bundle/helpers_test.go index dd9c841c9..e884cd8c6 100644 --- a/internal/bundle/helpers.go +++ b/integration/bundle/helpers_test.go @@ -1,4 +1,4 @@ -package bundle +package bundle_test import ( "bytes" @@ -9,133 +9,136 @@ import ( "os/exec" "path/filepath" "strings" - "testing" "github.com/databricks/cli/bundle" "github.com/databricks/cli/cmd/root" - "github.com/databricks/cli/internal" + "github.com/databricks/cli/internal/testcli" + "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/cmdio" "github.com/databricks/cli/libs/env" "github.com/databricks/cli/libs/filer" "github.com/databricks/cli/libs/flags" + "github.com/databricks/cli/libs/folders" "github.com/databricks/cli/libs/template" - "github.com/databricks/cli/libs/vfs" "github.com/databricks/databricks-sdk-go" "github.com/stretchr/testify/require" ) const defaultSparkVersion = "13.3.x-snapshot-scala2.12" -func initTestTemplate(t *testing.T, ctx context.Context, templateName string, config map[string]any) (string, error) { +func initTestTemplate(t testutil.TestingT, ctx context.Context, templateName string, config map[string]any) string { bundleRoot := t.TempDir() return initTestTemplateWithBundleRoot(t, ctx, templateName, config, bundleRoot) } -func initTestTemplateWithBundleRoot(t *testing.T, ctx context.Context, templateName string, config map[string]any, bundleRoot string) (string, error) { +func initTestTemplateWithBundleRoot(t testutil.TestingT, ctx context.Context, templateName string, config map[string]any, bundleRoot string) string { templateRoot := filepath.Join("bundles", templateName) - configFilePath, err := writeConfigFile(t, config) - if err != nil { - return "", err - } + configFilePath := writeConfigFile(t, config) ctx = root.SetWorkspaceClient(ctx, nil) - cmd := cmdio.NewIO(flags.OutputJSON, strings.NewReader(""), os.Stdout, os.Stderr, "", "bundles") + cmd := cmdio.NewIO(ctx, flags.OutputJSON, strings.NewReader(""), os.Stdout, os.Stderr, "", "bundles") ctx = cmdio.InContext(ctx, cmd) out, err := filer.NewLocalClient(bundleRoot) require.NoError(t, err) err = template.Materialize(ctx, configFilePath, os.DirFS(templateRoot), out) - return bundleRoot, err + require.NoError(t, err) + return bundleRoot } -func writeConfigFile(t *testing.T, config map[string]any) (string, error) { +func writeConfigFile(t testutil.TestingT, config map[string]any) string { bytes, err := json.Marshal(config) - if err != nil { - return "", err - } + require.NoError(t, err) dir := t.TempDir() filepath := filepath.Join(dir, "config.json") t.Log("Configuration for template: ", string(bytes)) - err = os.WriteFile(filepath, bytes, 0644) - return filepath, err + err = os.WriteFile(filepath, bytes, 0o644) + require.NoError(t, err) + return filepath } -func validateBundle(t *testing.T, ctx context.Context, path string) ([]byte, error) { +func validateBundle(t testutil.TestingT, ctx context.Context, path string) ([]byte, error) { ctx = env.Set(ctx, "BUNDLE_ROOT", path) - c := internal.NewCobraTestRunnerWithContext(t, ctx, "bundle", "validate", "--output", "json") + c := testcli.NewRunner(t, ctx, "bundle", "validate", "--output", "json") stdout, _, err := c.Run() return stdout.Bytes(), err } -func mustValidateBundle(t *testing.T, ctx context.Context, path string) []byte { +func mustValidateBundle(t testutil.TestingT, ctx context.Context, path string) []byte { data, err := validateBundle(t, ctx, path) require.NoError(t, err) return data } -func unmarshalConfig(t *testing.T, data []byte) *bundle.Bundle { +func unmarshalConfig(t testutil.TestingT, data []byte) *bundle.Bundle { bundle := &bundle.Bundle{} err := json.Unmarshal(data, &bundle.Config) require.NoError(t, err) return bundle } -func deployBundle(t *testing.T, ctx context.Context, path string) error { +func deployBundle(t testutil.TestingT, ctx context.Context, path string) { ctx = env.Set(ctx, "BUNDLE_ROOT", path) - c := internal.NewCobraTestRunnerWithContext(t, ctx, "bundle", "deploy", "--force-lock", "--auto-approve") + c := testcli.NewRunner(t, ctx, "bundle", "deploy", "--force-lock", "--auto-approve") _, _, err := c.Run() - return err + require.NoError(t, err) } -func deployBundleWithArgs(t *testing.T, ctx context.Context, path string, args ...string) (string, string, error) { +func deployBundleWithArgsErr(t testutil.TestingT, ctx context.Context, path string, args ...string) (string, string, error) { ctx = env.Set(ctx, "BUNDLE_ROOT", path) args = append([]string{"bundle", "deploy"}, args...) - c := internal.NewCobraTestRunnerWithContext(t, ctx, args...) + c := testcli.NewRunner(t, ctx, args...) stdout, stderr, err := c.Run() return stdout.String(), stderr.String(), err } -func deployBundleWithFlags(t *testing.T, ctx context.Context, path string, flags []string) error { +func deployBundleWithArgs(t testutil.TestingT, ctx context.Context, path string, args ...string) (string, string) { + stdout, stderr, err := deployBundleWithArgsErr(t, ctx, path, args...) + require.NoError(t, err) + return stdout, stderr +} + +func deployBundleWithFlags(t testutil.TestingT, ctx context.Context, path string, flags []string) { ctx = env.Set(ctx, "BUNDLE_ROOT", path) args := []string{"bundle", "deploy", "--force-lock"} args = append(args, flags...) - c := internal.NewCobraTestRunnerWithContext(t, ctx, args...) + c := testcli.NewRunner(t, ctx, args...) _, _, err := c.Run() - return err + require.NoError(t, err) } -func runResource(t *testing.T, ctx context.Context, path string, key string) (string, error) { +func runResource(t testutil.TestingT, ctx context.Context, path, key string) (string, error) { ctx = env.Set(ctx, "BUNDLE_ROOT", path) ctx = cmdio.NewContext(ctx, cmdio.Default()) - c := internal.NewCobraTestRunnerWithContext(t, ctx, "bundle", "run", key) + c := testcli.NewRunner(t, ctx, "bundle", "run", key) stdout, _, err := c.Run() return stdout.String(), err } -func runResourceWithParams(t *testing.T, ctx context.Context, path string, key string, params ...string) (string, error) { +func runResourceWithParams(t testutil.TestingT, ctx context.Context, path, key string, params ...string) (string, error) { ctx = env.Set(ctx, "BUNDLE_ROOT", path) ctx = cmdio.NewContext(ctx, cmdio.Default()) args := make([]string, 0) args = append(args, "bundle", "run", key) args = append(args, params...) - c := internal.NewCobraTestRunnerWithContext(t, ctx, args...) + c := testcli.NewRunner(t, ctx, args...) stdout, _, err := c.Run() return stdout.String(), err } -func destroyBundle(t *testing.T, ctx context.Context, path string) error { +func destroyBundle(t testutil.TestingT, ctx context.Context, path string) { ctx = env.Set(ctx, "BUNDLE_ROOT", path) - c := internal.NewCobraTestRunnerWithContext(t, ctx, "bundle", "destroy", "--auto-approve") + c := testcli.NewRunner(t, ctx, "bundle", "destroy", "--auto-approve") _, _, err := c.Run() - return err + require.NoError(t, err) } -func getBundleRemoteRootPath(w *databricks.WorkspaceClient, t *testing.T, uniqueId string) string { +func getBundleRemoteRootPath(w *databricks.WorkspaceClient, t testutil.TestingT, uniqueId string) string { // Compute root path for the bundle deployment me, err := w.CurrentUser.Me(context.Background()) require.NoError(t, err) @@ -143,16 +146,19 @@ func getBundleRemoteRootPath(w *databricks.WorkspaceClient, t *testing.T, unique return root } -func blackBoxRun(t *testing.T, root string, args ...string) (stdout string, stderr string) { - cwd := vfs.MustNew(".") - gitRoot, err := vfs.FindLeafInTree(cwd, ".git") +func blackBoxRun(t testutil.TestingT, ctx context.Context, root string, args ...string) (stdout, stderr string) { + gitRoot, err := folders.FindDirWithLeaf(".", ".git") require.NoError(t, err) - t.Setenv("BUNDLE_ROOT", root) - // Create the command cmd := exec.Command("go", append([]string{"run", "main.go"}, args...)...) - cmd.Dir = gitRoot.Native() + cmd.Dir = gitRoot + + // Configure the environment + ctx = env.Set(ctx, "BUNDLE_ROOT", root) + for key, value := range env.All(ctx) { + cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", key, value)) + } // Create buffers to capture output var outBuffer, errBuffer bytes.Buffer diff --git a/integration/bundle/init_default_python_test.go b/integration/bundle/init_default_python_test.go new file mode 100644 index 000000000..c93e6b50b --- /dev/null +++ b/integration/bundle/init_default_python_test.go @@ -0,0 +1,133 @@ +package bundle_test + +import ( + "encoding/json" + "os" + "os/exec" + "path/filepath" + "testing" + + "github.com/databricks/cli/integration/internal/acc" + "github.com/databricks/cli/internal/testcli" + "github.com/databricks/cli/internal/testutil" + "github.com/databricks/cli/libs/python/pythontest" + "github.com/databricks/cli/libs/testdiff" + "github.com/stretchr/testify/require" +) + +var pythonVersions = []string{ + "3.8", + "3.9", + "3.10", + "3.11", + "3.12", + "3.13", +} + +var pythonVersionsShort = []string{ + "3.9", + "3.12", +} + +var extraInstalls = map[string][]string{ + "3.12": {"setuptools"}, + "3.13": {"setuptools"}, +} + +func TestDefaultPython(t *testing.T) { + versions := pythonVersions + if testing.Short() { + versions = pythonVersionsShort + } + + for _, pythonVersion := range versions { + t.Run(pythonVersion, func(t *testing.T) { + testDefaultPython(t, pythonVersion) + }) + } +} + +func testDefaultPython(t *testing.T, pythonVersion string) { + ctx, wt := acc.WorkspaceTest(t) + + uniqueProjectId := testutil.RandomName("") + ctx, replacements := testdiff.WithReplacementsMap(ctx) + replacements.Set(uniqueProjectId, "$UNIQUE_PRJ") + + user, err := wt.W.CurrentUser.Me(ctx) + require.NoError(t, err) + require.NotNil(t, user) + testdiff.PrepareReplacementsUser(t, replacements, *user) + testdiff.PrepareReplacements(t, replacements, wt.W) + + tmpDir := t.TempDir() + testutil.Chdir(t, tmpDir) + + opts := pythontest.VenvOpts{ + PythonVersion: pythonVersion, + Dir: tmpDir, + } + + pythontest.RequireActivatedPythonEnv(t, ctx, &opts) + extras, ok := extraInstalls[pythonVersion] + if ok { + args := append([]string{"pip", "install", "--python", opts.PythonExe}, extras...) + cmd := exec.Command("uv", args...) + require.NoError(t, cmd.Run()) + } + + projectName := "project_name_" + uniqueProjectId + + initConfig := map[string]string{ + "project_name": projectName, + "include_notebook": "yes", + "include_python": "yes", + "include_dlt": "yes", + } + b, err := json.Marshal(initConfig) + require.NoError(t, err) + err = os.WriteFile(filepath.Join(tmpDir, "config.json"), b, 0o644) + require.NoError(t, err) + + testcli.AssertOutput( + t, + ctx, + []string{"bundle", "init", "default-python", "--config-file", "config.json"}, + testutil.TestData("testdata/default_python/bundle_init.txt"), + ) + testutil.Chdir(t, projectName) + + t.Cleanup(func() { + // Delete the stack + testcli.RequireSuccessfulRun(t, ctx, "bundle", "destroy", "--auto-approve") + }) + + testcli.AssertOutput( + t, + ctx, + []string{"bundle", "validate"}, + testutil.TestData("testdata/default_python/bundle_validate.txt"), + ) + testcli.AssertOutput( + t, + ctx, + []string{"bundle", "deploy"}, + testutil.TestData("testdata/default_python/bundle_deploy.txt"), + ) + + testcli.AssertOutputJQ( + t, + ctx, + []string{"bundle", "summary", "--output", "json"}, + testutil.TestData("testdata/default_python/bundle_summary.txt"), + []string{ + "/bundle/terraform/exec_path", + "/resources/jobs/project_name_$UNIQUE_PRJ_job/email_notifications", + "/resources/jobs/project_name_$UNIQUE_PRJ_job/job_clusters/0/new_cluster/node_type_id", + "/resources/jobs/project_name_$UNIQUE_PRJ_job/url", + "/resources/pipelines/project_name_$UNIQUE_PRJ_pipeline/catalog", + "/resources/pipelines/project_name_$UNIQUE_PRJ_pipeline/url", + "/workspace/current_user", + }, + ) +} diff --git a/internal/init_test.go b/integration/bundle/init_test.go similarity index 68% rename from internal/init_test.go rename to integration/bundle/init_test.go index 25bfc19da..87a3e30e5 100644 --- a/internal/init_test.go +++ b/integration/bundle/init_test.go @@ -1,4 +1,4 @@ -package internal +package bundle_test import ( "context" @@ -11,18 +11,18 @@ import ( "testing" "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/integration/internal/acc" + "github.com/databricks/cli/internal/testcli" "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/iamutil" - "github.com/databricks/databricks-sdk-go" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) -func TestAccBundleInitErrorOnUnknownFields(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) - +func TestBundleInitErrorOnUnknownFields(t *testing.T) { + ctx := context.Background() tmpDir := t.TempDir() - _, _, err := RequireErrorRun(t, "bundle", "init", "./testdata/init/field-does-not-exist", "--output-dir", tmpDir) + _, _, err := testcli.RequireErrorRun(t, ctx, "bundle", "init", "./testdata/init/field-does-not-exist", "--output-dir", tmpDir) assert.EqualError(t, err, "failed to compute file content for bar.tmpl. variable \"does_not_exist\" not defined") } @@ -38,17 +38,15 @@ func TestAccBundleInitErrorOnUnknownFields(t *testing.T) { // 2. While rare and to be avoided if possible, the CLI reserves the right to // make changes that can break the MLOps Stacks DAB. In which case we should // skip this test until the MLOps Stacks DAB is updated to work again. -func TestAccBundleInitOnMlopsStacks(t *testing.T) { - t.Parallel() - env := testutil.GetCloud(t).String() +func TestBundleInitOnMlopsStacks(t *testing.T) { + ctx, wt := acc.WorkspaceTest(t) + w := wt.W tmpDir1 := t.TempDir() tmpDir2 := t.TempDir() - w, err := databricks.NewWorkspaceClient(&databricks.Config{}) - require.NoError(t, err) - - projectName := RandomName("project_name_") + projectName := testutil.RandomName("project_name_") + env := testutil.GetCloud(t).String() // Create a config file with the project name and root dir initConfig := map[string]string{ @@ -59,29 +57,30 @@ func TestAccBundleInitOnMlopsStacks(t *testing.T) { } b, err := json.Marshal(initConfig) require.NoError(t, err) - os.WriteFile(filepath.Join(tmpDir1, "config.json"), b, 0644) + err = os.WriteFile(filepath.Join(tmpDir1, "config.json"), b, 0o644) + require.NoError(t, err) // Run bundle init assert.NoFileExists(t, filepath.Join(tmpDir2, "repo_name", projectName, "README.md")) - RequireSuccessfulRun(t, "bundle", "init", "mlops-stacks", "--output-dir", tmpDir2, "--config-file", filepath.Join(tmpDir1, "config.json")) + testcli.RequireSuccessfulRun(t, ctx, "bundle", "init", "mlops-stacks", "--output-dir", tmpDir2, "--config-file", filepath.Join(tmpDir1, "config.json")) // Assert that the README.md file was created - assert.FileExists(t, filepath.Join(tmpDir2, "repo_name", projectName, "README.md")) - assertLocalFileContents(t, filepath.Join(tmpDir2, "repo_name", projectName, "README.md"), fmt.Sprintf("# %s", projectName)) + contents := testutil.ReadFile(t, filepath.Join(tmpDir2, "repo_name", projectName, "README.md")) + assert.Contains(t, contents, "# "+projectName) // Validate the stack testutil.Chdir(t, filepath.Join(tmpDir2, "repo_name", projectName)) - RequireSuccessfulRun(t, "bundle", "validate") + testcli.RequireSuccessfulRun(t, ctx, "bundle", "validate") // Deploy the stack - RequireSuccessfulRun(t, "bundle", "deploy") + testcli.RequireSuccessfulRun(t, ctx, "bundle", "deploy") t.Cleanup(func() { // Delete the stack - RequireSuccessfulRun(t, "bundle", "destroy", "--auto-approve") + testcli.RequireSuccessfulRun(t, ctx, "bundle", "destroy", "--auto-approve") }) // Get summary of the bundle deployment - stdout, _ := RequireSuccessfulRun(t, "bundle", "summary", "--output", "json") + stdout, _ := testcli.RequireSuccessfulRun(t, ctx, "bundle", "summary", "--output", "json") summary := &config.Root{} err = json.Unmarshal(stdout.Bytes(), summary) require.NoError(t, err) @@ -100,24 +99,23 @@ func TestAccBundleInitOnMlopsStacks(t *testing.T) { assert.Contains(t, job.Settings.Name, fmt.Sprintf("dev-%s-batch-inference-job", projectName)) } -func TestAccBundleInitHelpers(t *testing.T) { - env := GetEnvOrSkipTest(t, "CLOUD_ENV") - t.Log(env) +func TestBundleInitHelpers(t *testing.T) { + ctx, wt := acc.WorkspaceTest(t) + w := wt.W - w, err := databricks.NewWorkspaceClient(&databricks.Config{}) - require.NoError(t, err) - - me, err := w.CurrentUser.Me(context.Background()) + me, err := w.CurrentUser.Me(ctx) require.NoError(t, err) var smallestNode string - switch env { - case "azure": + switch testutil.GetCloud(t) { + case testutil.Azure: smallestNode = "Standard_D3_v2" - case "gcp": + case testutil.GCP: smallestNode = "n1-standard-4" - default: + case testutil.AWS: smallestNode = "i3.xlarge" + default: + t.Fatal("Unknown cloud environment") } tests := []struct { @@ -151,17 +149,18 @@ func TestAccBundleInitHelpers(t *testing.T) { tmpDir := t.TempDir() tmpDir2 := t.TempDir() - err := os.Mkdir(filepath.Join(tmpDir, "template"), 0755) + err := os.Mkdir(filepath.Join(tmpDir, "template"), 0o755) require.NoError(t, err) - err = os.WriteFile(filepath.Join(tmpDir, "template", "foo.txt.tmpl"), []byte(test.funcName), 0644) + err = os.WriteFile(filepath.Join(tmpDir, "template", "foo.txt.tmpl"), []byte(test.funcName), 0o644) require.NoError(t, err) - err = os.WriteFile(filepath.Join(tmpDir, "databricks_template_schema.json"), []byte("{}"), 0644) + err = os.WriteFile(filepath.Join(tmpDir, "databricks_template_schema.json"), []byte("{}"), 0o644) require.NoError(t, err) // Run bundle init. - RequireSuccessfulRun(t, "bundle", "init", tmpDir, "--output-dir", tmpDir2) + testcli.RequireSuccessfulRun(t, ctx, "bundle", "init", tmpDir, "--output-dir", tmpDir2) // Assert that the helper function was correctly computed. - assertLocalFileContents(t, filepath.Join(tmpDir2, "foo.txt"), test.expected) + contents := testutil.ReadFile(t, filepath.Join(tmpDir2, "foo.txt")) + assert.Contains(t, contents, test.expected) } } diff --git a/internal/bundle/job_metadata_test.go b/integration/bundle/job_metadata_test.go similarity index 82% rename from internal/bundle/job_metadata_test.go rename to integration/bundle/job_metadata_test.go index 21f1086ae..a7290c6e3 100644 --- a/internal/bundle/job_metadata_test.go +++ b/integration/bundle/job_metadata_test.go @@ -1,4 +1,4 @@ -package bundle +package bundle_test import ( "context" @@ -10,36 +10,32 @@ import ( "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/metadata" - "github.com/databricks/cli/internal" - "github.com/databricks/cli/internal/acc" - "github.com/databricks/cli/libs/env" + "github.com/databricks/cli/integration/internal/acc" + "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/filer" "github.com/google/uuid" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) -func TestAccJobsMetadataFile(t *testing.T) { +func TestJobsMetadataFile(t *testing.T) { ctx, wt := acc.WorkspaceTest(t) w := wt.W - nodeTypeId := internal.GetNodeTypeId(env.Get(ctx, "CLOUD_ENV")) + nodeTypeId := testutil.GetCloud(t).NodeTypeID() uniqueId := uuid.New().String() - bundleRoot, err := initTestTemplate(t, ctx, "job_metadata", map[string]any{ + bundleRoot := initTestTemplate(t, ctx, "job_metadata", map[string]any{ "unique_id": uniqueId, "node_type_id": nodeTypeId, "spark_version": defaultSparkVersion, }) - require.NoError(t, err) // deploy bundle - err = deployBundle(t, ctx, bundleRoot) - require.NoError(t, err) + deployBundle(t, ctx, bundleRoot) // Cleanup the deployed bundle t.Cleanup(func() { - err = destroyBundle(t, ctx, bundleRoot) - require.NoError(t, err) + destroyBundle(t, ctx, bundleRoot) }) // assert job 1 is created diff --git a/internal/bundle/local_state_staleness_test.go b/integration/bundle/local_state_staleness_test.go similarity index 68% rename from internal/bundle/local_state_staleness_test.go rename to integration/bundle/local_state_staleness_test.go index d11234667..398481504 100644 --- a/internal/bundle/local_state_staleness_test.go +++ b/integration/bundle/local_state_staleness_test.go @@ -1,12 +1,11 @@ -package bundle +package bundle_test import ( "context" "testing" - "github.com/databricks/cli/internal" - "github.com/databricks/cli/internal/acc" - "github.com/databricks/cli/libs/env" + "github.com/databricks/cli/integration/internal/acc" + "github.com/databricks/cli/internal/testutil" "github.com/databricks/databricks-sdk-go/listing" "github.com/databricks/databricks-sdk-go/service/jobs" "github.com/google/uuid" @@ -14,7 +13,7 @@ import ( "github.com/stretchr/testify/require" ) -func TestAccLocalStateStaleness(t *testing.T) { +func TestLocalStateStaleness(t *testing.T) { ctx, wt := acc.WorkspaceTest(t) w := wt.W @@ -25,19 +24,17 @@ func TestAccLocalStateStaleness(t *testing.T) { // Because of deploy (2), the locally cached state of bundle instance A should be stale. // Then for deploy (3), it must use the remote state over the stale local state. - nodeTypeId := internal.GetNodeTypeId(env.Get(ctx, "CLOUD_ENV")) + nodeTypeId := testutil.GetCloud(t).NodeTypeID() uniqueId := uuid.New().String() initialize := func() string { - root, err := initTestTemplate(t, ctx, "basic", map[string]any{ + root := initTestTemplate(t, ctx, "basic", map[string]any{ "unique_id": uniqueId, "node_type_id": nodeTypeId, "spark_version": defaultSparkVersion, }) - require.NoError(t, err) t.Cleanup(func() { - err = destroyBundle(t, ctx, root) - require.NoError(t, err) + destroyBundle(t, ctx, root) }) return root @@ -49,16 +46,13 @@ func TestAccLocalStateStaleness(t *testing.T) { bundleB := initialize() // 1) Deploy bundle A - err = deployBundle(t, ctx, bundleA) - require.NoError(t, err) + deployBundle(t, ctx, bundleA) // 2) Deploy bundle B - err = deployBundle(t, ctx, bundleB) - require.NoError(t, err) + deployBundle(t, ctx, bundleB) // 3) Deploy bundle A again - err = deployBundle(t, ctx, bundleA) - require.NoError(t, err) + deployBundle(t, ctx, bundleA) // Assert that there is only a single job in the workspace corresponding to this bundle. iter := w.Jobs.List(context.Background(), jobs.ListJobsRequest{ diff --git a/internal/bundle/python_wheel_test.go b/integration/bundle/python_wheel_test.go similarity index 66% rename from internal/bundle/python_wheel_test.go rename to integration/bundle/python_wheel_test.go index 846f14177..62846f7b5 100644 --- a/internal/bundle/python_wheel_test.go +++ b/integration/bundle/python_wheel_test.go @@ -1,37 +1,39 @@ -package bundle +package bundle_test import ( "testing" - "github.com/databricks/cli/internal" - "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/integration/internal/acc" "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/env" "github.com/google/uuid" "github.com/stretchr/testify/require" ) -func runPythonWheelTest(t *testing.T, templateName string, sparkVersion string, pythonWheelWrapper bool) { +func runPythonWheelTest(t *testing.T, templateName, sparkVersion string, pythonWheelWrapper bool) { ctx, _ := acc.WorkspaceTest(t) - nodeTypeId := internal.GetNodeTypeId(env.Get(ctx, "CLOUD_ENV")) + nodeTypeId := testutil.GetCloud(t).NodeTypeID() instancePoolId := env.Get(ctx, "TEST_INSTANCE_POOL_ID") - bundleRoot, err := initTestTemplate(t, ctx, templateName, map[string]any{ + bundleRoot := initTestTemplate(t, ctx, templateName, map[string]any{ "node_type_id": nodeTypeId, "unique_id": uuid.New().String(), "spark_version": sparkVersion, "python_wheel_wrapper": pythonWheelWrapper, "instance_pool_id": instancePoolId, }) - require.NoError(t, err) - err = deployBundle(t, ctx, bundleRoot) - require.NoError(t, err) + deployBundle(t, ctx, bundleRoot) t.Cleanup(func() { destroyBundle(t, ctx, bundleRoot) }) + if testing.Short() { + t.Log("Skip the job run in short mode") + return + } + out, err := runResource(t, ctx, bundleRoot, "some_other_job") require.NoError(t, err) require.Contains(t, out, "Hello from my func") @@ -45,18 +47,16 @@ func runPythonWheelTest(t *testing.T, templateName string, sparkVersion string, require.Contains(t, out, "['my_test_code', 'param1', 'param2']") } -func TestAccPythonWheelTaskDeployAndRunWithoutWrapper(t *testing.T) { +func TestPythonWheelTaskDeployAndRunWithoutWrapper(t *testing.T) { runPythonWheelTest(t, "python_wheel_task", "13.3.x-snapshot-scala2.12", false) } -func TestAccPythonWheelTaskDeployAndRunWithWrapper(t *testing.T) { +func TestPythonWheelTaskDeployAndRunWithWrapper(t *testing.T) { runPythonWheelTest(t, "python_wheel_task", "12.2.x-scala2.12", true) } -func TestAccPythonWheelTaskDeployAndRunOnInteractiveCluster(t *testing.T) { - _, wt := acc.WorkspaceTest(t) - - if testutil.IsAWSCloud(wt.T) { +func TestPythonWheelTaskDeployAndRunOnInteractiveCluster(t *testing.T) { + if testutil.GetCloud(t) == testutil.AWS { t.Skip("Skipping test for AWS cloud because it is not permitted to create clusters") } diff --git a/internal/bundle/spark_jar_test.go b/integration/bundle/spark_jar_test.go similarity index 78% rename from internal/bundle/spark_jar_test.go rename to integration/bundle/spark_jar_test.go index 4b469617c..cbdf5a00c 100644 --- a/internal/bundle/spark_jar_test.go +++ b/integration/bundle/spark_jar_test.go @@ -1,23 +1,21 @@ -package bundle +package bundle_test import ( "context" "testing" - "github.com/databricks/cli/internal" - "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/integration/internal/acc" "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/env" "github.com/google/uuid" "github.com/stretchr/testify/require" ) -func runSparkJarTestCommon(t *testing.T, ctx context.Context, sparkVersion string, artifactPath string) { - cloudEnv := internal.GetEnvOrSkipTest(t, "CLOUD_ENV") - nodeTypeId := internal.GetNodeTypeId(cloudEnv) +func runSparkJarTestCommon(t *testing.T, ctx context.Context, sparkVersion, artifactPath string) { + nodeTypeId := testutil.GetCloud(t).NodeTypeID() tmpDir := t.TempDir() instancePoolId := env.Get(ctx, "TEST_INSTANCE_POOL_ID") - bundleRoot, err := initTestTemplateWithBundleRoot(t, ctx, "spark_jar_task", map[string]any{ + bundleRoot := initTestTemplateWithBundleRoot(t, ctx, "spark_jar_task", map[string]any{ "node_type_id": nodeTypeId, "unique_id": uuid.New().String(), "spark_version": sparkVersion, @@ -25,15 +23,18 @@ func runSparkJarTestCommon(t *testing.T, ctx context.Context, sparkVersion strin "artifact_path": artifactPath, "instance_pool_id": instancePoolId, }, tmpDir) - require.NoError(t, err) - err = deployBundle(t, ctx, bundleRoot) - require.NoError(t, err) + deployBundle(t, ctx, bundleRoot) t.Cleanup(func() { destroyBundle(t, ctx, bundleRoot) }) + if testing.Short() { + t.Log("Skip the job run in short mode") + return + } + out, err := runResource(t, ctx, bundleRoot, "jar_job") require.NoError(t, err) require.Contains(t, out, "Hello from Jar!") @@ -41,7 +42,7 @@ func runSparkJarTestCommon(t *testing.T, ctx context.Context, sparkVersion strin func runSparkJarTestFromVolume(t *testing.T, sparkVersion string) { ctx, wt := acc.UcWorkspaceTest(t) - volumePath := internal.TemporaryUcVolume(t, wt.W) + volumePath := acc.TemporaryVolume(wt) ctx = env.Set(ctx, "DATABRICKS_BUNDLE_TARGET", "volume") runSparkJarTestCommon(t, ctx, sparkVersion, volumePath) } @@ -52,8 +53,7 @@ func runSparkJarTestFromWorkspace(t *testing.T, sparkVersion string) { runSparkJarTestCommon(t, ctx, sparkVersion, "n/a") } -func TestAccSparkJarTaskDeployAndRunOnVolumes(t *testing.T) { - internal.GetEnvOrSkipTest(t, "CLOUD_ENV") +func TestSparkJarTaskDeployAndRunOnVolumes(t *testing.T) { testutil.RequireJDK(t, context.Background(), "1.8.0") // Failure on earlier DBR versions: @@ -76,8 +76,7 @@ func TestAccSparkJarTaskDeployAndRunOnVolumes(t *testing.T) { } } -func TestAccSparkJarTaskDeployAndRunOnWorkspace(t *testing.T) { - internal.GetEnvOrSkipTest(t, "CLOUD_ENV") +func TestSparkJarTaskDeployAndRunOnWorkspace(t *testing.T) { testutil.RequireJDK(t, context.Background(), "1.8.0") // Failure on earlier DBR versions: diff --git a/integration/bundle/testdata/default_python/bundle_deploy.txt b/integration/bundle/testdata/default_python/bundle_deploy.txt new file mode 100644 index 000000000..eef0b79b3 --- /dev/null +++ b/integration/bundle/testdata/default_python/bundle_deploy.txt @@ -0,0 +1,6 @@ +Building project_name_$UNIQUE_PRJ... +Uploading project_name_$UNIQUE_PRJ-0.0.1+.-py3-none-any.whl... +Uploading bundle files to /Workspace/Users/$USERNAME/.bundle/project_name_$UNIQUE_PRJ/dev/files... +Deploying resources... +Updating deployment state... +Deployment complete! diff --git a/integration/bundle/testdata/default_python/bundle_init.txt b/integration/bundle/testdata/default_python/bundle_init.txt new file mode 100644 index 000000000..6cfc32f98 --- /dev/null +++ b/integration/bundle/testdata/default_python/bundle_init.txt @@ -0,0 +1,8 @@ + +Welcome to the default Python template for Databricks Asset Bundles! +Workspace to use (auto-detected, edit in 'project_name_$UNIQUE_PRJ/databricks.yml'): https://$DATABRICKS_HOST + +✨ Your new project has been created in the 'project_name_$UNIQUE_PRJ' directory! + +Please refer to the README.md file for "getting started" instructions. +See also the documentation at https://docs.databricks.com/dev-tools/bundles/index.html. diff --git a/integration/bundle/testdata/default_python/bundle_summary.txt b/integration/bundle/testdata/default_python/bundle_summary.txt new file mode 100644 index 000000000..a0bcfdbc8 --- /dev/null +++ b/integration/bundle/testdata/default_python/bundle_summary.txt @@ -0,0 +1,186 @@ +{ + "bundle": { + "name": "project_name_$UNIQUE_PRJ", + "target": "dev", + "environment": "dev", + "terraform": { + "exec_path": "/tmp/.../terraform" + }, + "git": { + "bundle_root_path": ".", + "inferred": true + }, + "mode": "development", + "deployment": { + "lock": { + "enabled": false + } + }, + "uuid": "" + }, + "include": [ + "resources/project_name_$UNIQUE_PRJ.job.yml", + "resources/project_name_$UNIQUE_PRJ.pipeline.yml" + ], + "workspace": { + "host": "https://$DATABRICKS_HOST", + "current_user": { + "active": true, + "displayName": "$USERNAME", + "emails": [ + { + "primary": true, + "type": "work", + "value": "$USERNAME" + } + ], + "groups": [ + { + "$ref": "Groups/$USER.Groups[0]", + "display": "team.engineering", + "type": "direct", + "value": "$USER.Groups[0]" + } + ], + "id": "$USER.Id", + "name": { + "familyName": "$USERNAME", + "givenName": "$USERNAME" + }, + "schemas": [ + "urn:ietf:params:scim:schemas:core:2.0:User", + "urn:ietf:params:scim:schemas:extension:workspace:2.0:User" + ], + "short_name": "$USERNAME", + "userName": "$USERNAME" + }, + "root_path": "/Workspace/Users/$USERNAME/.bundle/project_name_$UNIQUE_PRJ/dev", + "file_path": "/Workspace/Users/$USERNAME/.bundle/project_name_$UNIQUE_PRJ/dev/files", + "resource_path": "/Workspace/Users/$USERNAME/.bundle/project_name_$UNIQUE_PRJ/dev/resources", + "artifact_path": "/Workspace/Users/$USERNAME/.bundle/project_name_$UNIQUE_PRJ/dev/artifacts", + "state_path": "/Workspace/Users/$USERNAME/.bundle/project_name_$UNIQUE_PRJ/dev/state" + }, + "resources": { + "jobs": { + "project_name_$UNIQUE_PRJ_job": { + "deployment": { + "kind": "BUNDLE", + "metadata_file_path": "/Workspace/Users/$USERNAME/.bundle/project_name_$UNIQUE_PRJ/dev/state/metadata.json" + }, + "edit_mode": "UI_LOCKED", + "email_notifications": { + "on_failure": [ + "$USERNAME" + ] + }, + "format": "MULTI_TASK", + "id": "", + "job_clusters": [ + { + "job_cluster_key": "job_cluster", + "new_cluster": { + "autoscale": { + "max_workers": 4, + "min_workers": 1 + }, + "node_type_id": "i3.xlarge", + "spark_version": "15.4.x-scala2.12" + } + } + ], + "max_concurrent_runs": 4, + "name": "[dev $USERNAME] project_name_$UNIQUE_PRJ_job", + "queue": { + "enabled": true + }, + "tags": { + "dev": "$USERNAME" + }, + "tasks": [ + { + "job_cluster_key": "job_cluster", + "notebook_task": { + "notebook_path": "/Workspace/Users/$USERNAME/.bundle/project_name_$UNIQUE_PRJ/dev/files/src/notebook" + }, + "task_key": "notebook_task" + }, + { + "depends_on": [ + { + "task_key": "notebook_task" + } + ], + "pipeline_task": { + "pipeline_id": "${resources.pipelines.project_name_$UNIQUE_PRJ_pipeline.id}" + }, + "task_key": "refresh_pipeline" + }, + { + "depends_on": [ + { + "task_key": "refresh_pipeline" + } + ], + "job_cluster_key": "job_cluster", + "libraries": [ + { + "whl": "dist/*.whl" + } + ], + "python_wheel_task": { + "entry_point": "main", + "package_name": "project_name_$UNIQUE_PRJ" + }, + "task_key": "main_task" + } + ], + "trigger": { + "pause_status": "PAUSED", + "periodic": { + "interval": 1, + "unit": "DAYS" + } + }, + "url": "https://$DATABRICKS_HOST/jobs/?o=" + } + }, + "pipelines": { + "project_name_$UNIQUE_PRJ_pipeline": { + "catalog": "main", + "configuration": { + "bundle.sourcePath": "/Workspace/Users/$USERNAME/.bundle/project_name_$UNIQUE_PRJ/dev/files/src" + }, + "deployment": { + "kind": "BUNDLE", + "metadata_file_path": "/Workspace/Users/$USERNAME/.bundle/project_name_$UNIQUE_PRJ/dev/state/metadata.json" + }, + "development": true, + "id": "", + "libraries": [ + { + "notebook": { + "path": "/Workspace/Users/$USERNAME/.bundle/project_name_$UNIQUE_PRJ/dev/files/src/dlt_pipeline" + } + } + ], + "name": "[dev $USERNAME] project_name_$UNIQUE_PRJ_pipeline", + "target": "project_name_$UNIQUE_PRJ_dev", + "url": "https://$DATABRICKS_HOST/pipelines/?o=" + } + } + }, + "sync": { + "paths": [ + "." + ] + }, + "presets": { + "name_prefix": "[dev $USERNAME] ", + "pipelines_development": true, + "trigger_pause_status": "PAUSED", + "jobs_max_concurrent_runs": 4, + "tags": { + "dev": "$USERNAME" + } + } +} \ No newline at end of file diff --git a/integration/bundle/testdata/default_python/bundle_validate.txt b/integration/bundle/testdata/default_python/bundle_validate.txt new file mode 100644 index 000000000..88a5fdd18 --- /dev/null +++ b/integration/bundle/testdata/default_python/bundle_validate.txt @@ -0,0 +1,8 @@ +Name: project_name_$UNIQUE_PRJ +Target: dev +Workspace: + Host: https://$DATABRICKS_HOST + User: $USERNAME + Path: /Workspace/Users/$USERNAME/.bundle/project_name_$UNIQUE_PRJ/dev + +Validation OK! diff --git a/internal/testdata/init/field-does-not-exist/databricks_template_schema.json b/integration/bundle/testdata/init/field-does-not-exist/databricks_template_schema.json similarity index 100% rename from internal/testdata/init/field-does-not-exist/databricks_template_schema.json rename to integration/bundle/testdata/init/field-does-not-exist/databricks_template_schema.json diff --git a/internal/testdata/init/field-does-not-exist/template/bar.tmpl b/integration/bundle/testdata/init/field-does-not-exist/template/bar.tmpl similarity index 100% rename from internal/testdata/init/field-does-not-exist/template/bar.tmpl rename to integration/bundle/testdata/init/field-does-not-exist/template/bar.tmpl diff --git a/internal/bundle/validate_test.go b/integration/bundle/validate_test.go similarity index 90% rename from internal/bundle/validate_test.go rename to integration/bundle/validate_test.go index 18da89e4c..2dd8ada67 100644 --- a/internal/bundle/validate_test.go +++ b/integration/bundle/validate_test.go @@ -1,8 +1,9 @@ -package bundle +package bundle_test import ( "context" "encoding/json" + "path/filepath" "testing" "github.com/databricks/cli/internal/testutil" @@ -12,11 +13,9 @@ import ( "github.com/stretchr/testify/require" ) -func TestAccBundleValidate(t *testing.T) { - testutil.GetEnvOrSkipTest(t, "CLOUD_ENV") - +func TestBundleValidate(t *testing.T) { tmpDir := t.TempDir() - testutil.WriteFile(t, + testutil.WriteFile(t, filepath.Join(tmpDir, "databricks.yml"), ` bundle: name: "foobar" @@ -33,7 +32,7 @@ resources: inner_loop: name: inner loop -`, tmpDir, "databricks.yml") +`) ctx := context.Background() stdout, err := validateBundle(t, ctx, tmpDir) diff --git a/integration/cmd/alerts/alerts_test.go b/integration/cmd/alerts/alerts_test.go new file mode 100644 index 000000000..ca1719813 --- /dev/null +++ b/integration/cmd/alerts/alerts_test.go @@ -0,0 +1,15 @@ +package alerts_test + +import ( + "context" + "testing" + + "github.com/databricks/cli/internal/testcli" + "github.com/stretchr/testify/assert" +) + +func TestAlertsCreateErrWhenNoArguments(t *testing.T) { + ctx := context.Background() + _, _, err := testcli.RequireErrorRun(t, ctx, "alerts-legacy", "create") + assert.Equal(t, "please provide command input in JSON format by specifying the --json flag", err.Error()) +} diff --git a/integration/cmd/api/api_test.go b/integration/cmd/api/api_test.go new file mode 100644 index 000000000..4cb9b1737 --- /dev/null +++ b/integration/cmd/api/api_test.go @@ -0,0 +1,56 @@ +package api_test + +import ( + "context" + "encoding/json" + "fmt" + "path" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + _ "github.com/databricks/cli/cmd/api" + "github.com/databricks/cli/internal/testcli" + "github.com/databricks/cli/internal/testutil" +) + +func TestApiGet(t *testing.T) { + ctx := context.Background() + + stdout, _ := testcli.RequireSuccessfulRun(t, ctx, "api", "get", "/api/2.0/preview/scim/v2/Me") + + // Deserialize SCIM API response. + var out map[string]any + err := json.Unmarshal(stdout.Bytes(), &out) + require.NoError(t, err) + + // Assert that the output somewhat makes sense for the SCIM API. + assert.Equal(t, true, out["active"]) + assert.NotNil(t, out["id"]) +} + +func TestApiPost(t *testing.T) { + ctx := context.Background() + + if testutil.GetCloud(t) == testutil.GCP { + t.Skip("DBFS REST API is disabled on gcp") + } + + dbfsPath := path.Join("/tmp/databricks/integration", testutil.RandomName("api-post")) + requestPath := filepath.Join(t.TempDir(), "body.json") + testutil.WriteFile(t, requestPath, fmt.Sprintf(`{ + "path": "%s" + }`, dbfsPath)) + + // Post to mkdir + { + testcli.RequireSuccessfulRun(t, ctx, "api", "post", "--json=@"+requestPath, "/api/2.0/dbfs/mkdirs") + } + + // Post to delete + { + testcli.RequireSuccessfulRun(t, ctx, "api", "post", "--json=@"+requestPath, "/api/2.0/dbfs/delete") + } +} diff --git a/internal/auth_describe_test.go b/integration/cmd/auth/describe_test.go similarity index 56% rename from internal/auth_describe_test.go rename to integration/cmd/auth/describe_test.go index 90b5d6801..f592bc276 100644 --- a/internal/auth_describe_test.go +++ b/integration/cmd/auth/describe_test.go @@ -1,39 +1,41 @@ -package internal +package auth_test import ( "context" - "fmt" "testing" + "github.com/databricks/cli/internal/testcli" "github.com/databricks/databricks-sdk-go" "github.com/stretchr/testify/require" ) func TestAuthDescribeSuccess(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) + t.Skipf("Skipping because of https://github.com/databricks/cli/issues/2010") - stdout, _ := RequireSuccessfulRun(t, "auth", "describe") + ctx := context.Background() + stdout, _ := testcli.RequireSuccessfulRun(t, ctx, "auth", "describe") outStr := stdout.String() w, err := databricks.NewWorkspaceClient(&databricks.Config{}) require.NoError(t, err) require.NotEmpty(t, outStr) - require.Contains(t, outStr, fmt.Sprintf("Host: %s", w.Config.Host)) + require.Contains(t, outStr, "Host: "+w.Config.Host) me, err := w.CurrentUser.Me(context.Background()) require.NoError(t, err) - require.Contains(t, outStr, fmt.Sprintf("User: %s", me.UserName)) - require.Contains(t, outStr, fmt.Sprintf("Authenticated with: %s", w.Config.AuthType)) + require.Contains(t, outStr, "User: "+me.UserName) + require.Contains(t, outStr, "Authenticated with: "+w.Config.AuthType) require.Contains(t, outStr, "Current configuration:") - require.Contains(t, outStr, fmt.Sprintf("✓ host: %s", w.Config.Host)) + require.Contains(t, outStr, "✓ host: "+w.Config.Host) require.Contains(t, outStr, "✓ profile: default") } func TestAuthDescribeFailure(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) + t.Skipf("Skipping because of https://github.com/databricks/cli/issues/2010") - stdout, _ := RequireSuccessfulRun(t, "auth", "describe", "--profile", "nonexistent") + ctx := context.Background() + stdout, _ := testcli.RequireSuccessfulRun(t, ctx, "auth", "describe", "--profile", "nonexistent") outStr := stdout.String() require.NotEmpty(t, outStr) @@ -44,6 +46,6 @@ func TestAuthDescribeFailure(t *testing.T) { w, err := databricks.NewWorkspaceClient(&databricks.Config{}) require.NoError(t, err) - require.Contains(t, outStr, fmt.Sprintf("✓ host: %s", w.Config.Host)) + require.Contains(t, outStr, "✓ host: "+w.Config.Host) require.Contains(t, outStr, "✓ profile: nonexistent (from --profile flag)") } diff --git a/integration/cmd/clusters/clusters_test.go b/integration/cmd/clusters/clusters_test.go new file mode 100644 index 000000000..4e20a0558 --- /dev/null +++ b/integration/cmd/clusters/clusters_test.go @@ -0,0 +1,63 @@ +package clusters_test + +import ( + "context" + "fmt" + "regexp" + "testing" + + "github.com/databricks/cli/integration/internal/acc" + "github.com/databricks/cli/internal/testcli" + "github.com/databricks/databricks-sdk-go/listing" + "github.com/databricks/databricks-sdk-go/service/compute" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestClustersList(t *testing.T) { + ctx := context.Background() + stdout, stderr := testcli.RequireSuccessfulRun(t, ctx, "clusters", "list") + outStr := stdout.String() + assert.Contains(t, outStr, "ID") + assert.Contains(t, outStr, "Name") + assert.Contains(t, outStr, "State") + assert.Equal(t, "", stderr.String()) + + idRegExp := regexp.MustCompile(`[0-9]{4}\-[0-9]{6}-[a-z0-9]{8}`) + clusterId := idRegExp.FindString(outStr) + assert.NotEmpty(t, clusterId) +} + +func TestClustersGet(t *testing.T) { + ctx := context.Background() + clusterId := findValidClusterID(t) + stdout, stderr := testcli.RequireSuccessfulRun(t, ctx, "clusters", "get", clusterId) + outStr := stdout.String() + assert.Contains(t, outStr, fmt.Sprintf(`"cluster_id":"%s"`, clusterId)) + assert.Equal(t, "", stderr.String()) +} + +func TestClusterCreateErrorWhenNoArguments(t *testing.T) { + ctx := context.Background() + _, _, err := testcli.RequireErrorRun(t, ctx, "clusters", "create") + assert.Contains(t, err.Error(), "accepts 1 arg(s), received 0") +} + +// findValidClusterID lists clusters in the workspace to find a valid cluster ID. +func findValidClusterID(t *testing.T) string { + ctx, wt := acc.WorkspaceTest(t) + it := wt.W.Clusters.List(ctx, compute.ListClustersRequest{ + FilterBy: &compute.ListClustersFilterBy{ + ClusterSources: []compute.ClusterSource{ + compute.ClusterSourceApi, + compute.ClusterSourceUi, + }, + }, + }) + + clusterIDs, err := listing.ToSliceN(ctx, it, 1) + require.NoError(t, err) + require.Len(t, clusterIDs, 1) + + return clusterIDs[0].ClusterId +} diff --git a/internal/fs_cat_test.go b/integration/cmd/fs/cat_test.go similarity index 57% rename from internal/fs_cat_test.go rename to integration/cmd/fs/cat_test.go index 6292aef18..3e964fe6e 100644 --- a/internal/fs_cat_test.go +++ b/integration/cmd/fs/cat_test.go @@ -1,4 +1,4 @@ -package internal +package fs_test import ( "context" @@ -7,13 +7,14 @@ import ( "strings" "testing" + "github.com/databricks/cli/integration/internal/acc" + "github.com/databricks/cli/internal/testcli" "github.com/databricks/cli/libs/filer" - "github.com/databricks/databricks-sdk-go" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) -func TestAccFsCat(t *testing.T) { +func TestFsCat(t *testing.T) { t.Parallel() for _, testCase := range fsTests { @@ -22,18 +23,20 @@ func TestAccFsCat(t *testing.T) { t.Run(tc.name, func(t *testing.T) { t.Parallel() + ctx := context.Background() f, tmpDir := tc.setupFiler(t) + err := f.Write(context.Background(), "hello.txt", strings.NewReader("abcd"), filer.CreateParentDirectories) require.NoError(t, err) - stdout, stderr := RequireSuccessfulRun(t, "fs", "cat", path.Join(tmpDir, "hello.txt")) + stdout, stderr := testcli.RequireSuccessfulRun(t, ctx, "fs", "cat", path.Join(tmpDir, "hello.txt")) assert.Equal(t, "", stderr.String()) assert.Equal(t, "abcd", stdout.String()) }) } } -func TestAccFsCatOnADir(t *testing.T) { +func TestFsCatOnADir(t *testing.T) { t.Parallel() for _, testCase := range fsTests { @@ -42,17 +45,19 @@ func TestAccFsCatOnADir(t *testing.T) { t.Run(tc.name, func(t *testing.T) { t.Parallel() + ctx := context.Background() f, tmpDir := tc.setupFiler(t) + err := f.Mkdir(context.Background(), "dir1") require.NoError(t, err) - _, _, err = RequireErrorRun(t, "fs", "cat", path.Join(tmpDir, "dir1")) + _, _, err = testcli.RequireErrorRun(t, ctx, "fs", "cat", path.Join(tmpDir, "dir1")) assert.ErrorAs(t, err, &filer.NotAFile{}) }) } } -func TestAccFsCatOnNonExistentFile(t *testing.T) { +func TestFsCatOnNonExistentFile(t *testing.T) { t.Parallel() for _, testCase := range fsTests { @@ -61,36 +66,32 @@ func TestAccFsCatOnNonExistentFile(t *testing.T) { t.Run(tc.name, func(t *testing.T) { t.Parallel() + ctx := context.Background() _, tmpDir := tc.setupFiler(t) - _, _, err := RequireErrorRun(t, "fs", "cat", path.Join(tmpDir, "non-existent-file")) + _, _, err := testcli.RequireErrorRun(t, ctx, "fs", "cat", path.Join(tmpDir, "non-existent-file")) assert.ErrorIs(t, err, fs.ErrNotExist) }) } } -func TestAccFsCatForDbfsInvalidScheme(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) - - _, _, err := RequireErrorRun(t, "fs", "cat", "dab:/non-existent-file") +func TestFsCatForDbfsInvalidScheme(t *testing.T) { + ctx := context.Background() + _, _, err := testcli.RequireErrorRun(t, ctx, "fs", "cat", "dab:/non-existent-file") assert.ErrorContains(t, err, "invalid scheme: dab") } -func TestAccFsCatDoesNotSupportOutputModeJson(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) - - ctx := context.Background() - w, err := databricks.NewWorkspaceClient() - require.NoError(t, err) - - tmpDir := TemporaryDbfsDir(t, w) +func TestFsCatDoesNotSupportOutputModeJson(t *testing.T) { + ctx, wt := acc.WorkspaceTest(t) + w := wt.W + tmpDir := acc.TemporaryDbfsDir(wt, "fs-cat-") f, err := filer.NewDbfsClient(w, tmpDir) require.NoError(t, err) err = f.Write(ctx, "hello.txt", strings.NewReader("abc")) require.NoError(t, err) - _, _, err = RequireErrorRun(t, "fs", "cat", "dbfs:"+path.Join(tmpDir, "hello.txt"), "--output=json") + _, _, err = testcli.RequireErrorRun(t, ctx, "fs", "cat", "dbfs:"+path.Join(tmpDir, "hello.txt"), "--output=json") assert.ErrorContains(t, err, "json output not supported") } diff --git a/internal/completer_test.go b/integration/cmd/fs/completion_test.go similarity index 66% rename from internal/completer_test.go rename to integration/cmd/fs/completion_test.go index b2c936886..b13bf9d60 100644 --- a/internal/completer_test.go +++ b/integration/cmd/fs/completion_test.go @@ -1,12 +1,12 @@ -package internal +package fs_test import ( "context" - "fmt" "strings" "testing" _ "github.com/databricks/cli/cmd/fs" + "github.com/databricks/cli/internal/testcli" "github.com/databricks/cli/libs/filer" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -17,11 +17,12 @@ func setupCompletionFile(t *testing.T, f filer.Filer) { require.NoError(t, err) } -func TestAccFsCompletion(t *testing.T) { +func TestFsCompletion(t *testing.T) { + ctx := context.Background() f, tmpDir := setupDbfsFiler(t) setupCompletionFile(t, f) - stdout, _ := RequireSuccessfulRun(t, "__complete", "fs", "ls", tmpDir+"/") - expectedOutput := fmt.Sprintf("%s/dir1/\n:2\n", tmpDir) + stdout, _ := testcli.RequireSuccessfulRun(t, ctx, "__complete", "fs", "ls", tmpDir+"/") + expectedOutput := tmpDir + "/dir1/\n:2\n" assert.Equal(t, expectedOutput, stdout.String()) } diff --git a/internal/fs_cp_test.go b/integration/cmd/fs/cp_test.go similarity index 76% rename from internal/fs_cp_test.go rename to integration/cmd/fs/cp_test.go index b69735bc0..76aef7acf 100644 --- a/internal/fs_cp_test.go +++ b/integration/cmd/fs/cp_test.go @@ -1,4 +1,4 @@ -package internal +package fs_test import ( "context" @@ -10,6 +10,8 @@ import ( "strings" "testing" + "github.com/databricks/cli/internal/testcli" + "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/filer" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -61,8 +63,8 @@ func assertTargetDir(t *testing.T, ctx context.Context, f filer.Filer) { type cpTest struct { name string - setupSource func(*testing.T) (filer.Filer, string) - setupTarget func(*testing.T) (filer.Filer, string) + setupSource func(testutil.TestingT) (filer.Filer, string) + setupTarget func(testutil.TestingT) (filer.Filer, string) } func copyTests() []cpTest { @@ -120,7 +122,7 @@ func copyTests() []cpTest { } } -func TestAccFsCpDir(t *testing.T) { +func TestFsCpDir(t *testing.T) { t.Parallel() for _, testCase := range copyTests() { @@ -129,18 +131,19 @@ func TestAccFsCpDir(t *testing.T) { t.Run(tc.name, func(t *testing.T) { t.Parallel() + ctx := context.Background() sourceFiler, sourceDir := tc.setupSource(t) targetFiler, targetDir := tc.setupTarget(t) setupSourceDir(t, context.Background(), sourceFiler) - RequireSuccessfulRun(t, "fs", "cp", sourceDir, targetDir, "--recursive") + testcli.RequireSuccessfulRun(t, ctx, "fs", "cp", sourceDir, targetDir, "--recursive") assertTargetDir(t, context.Background(), targetFiler) }) } } -func TestAccFsCpFileToFile(t *testing.T) { +func TestFsCpFileToFile(t *testing.T) { t.Parallel() for _, testCase := range copyTests() { @@ -149,18 +152,19 @@ func TestAccFsCpFileToFile(t *testing.T) { t.Run(tc.name, func(t *testing.T) { t.Parallel() + ctx := context.Background() sourceFiler, sourceDir := tc.setupSource(t) targetFiler, targetDir := tc.setupTarget(t) setupSourceFile(t, context.Background(), sourceFiler) - RequireSuccessfulRun(t, "fs", "cp", path.Join(sourceDir, "foo.txt"), path.Join(targetDir, "bar.txt")) + testcli.RequireSuccessfulRun(t, ctx, "fs", "cp", path.Join(sourceDir, "foo.txt"), path.Join(targetDir, "bar.txt")) assertTargetFile(t, context.Background(), targetFiler, "bar.txt") }) } } -func TestAccFsCpFileToDir(t *testing.T) { +func TestFsCpFileToDir(t *testing.T) { t.Parallel() for _, testCase := range copyTests() { @@ -169,18 +173,19 @@ func TestAccFsCpFileToDir(t *testing.T) { t.Run(tc.name, func(t *testing.T) { t.Parallel() + ctx := context.Background() sourceFiler, sourceDir := tc.setupSource(t) targetFiler, targetDir := tc.setupTarget(t) setupSourceFile(t, context.Background(), sourceFiler) - RequireSuccessfulRun(t, "fs", "cp", path.Join(sourceDir, "foo.txt"), targetDir) + testcli.RequireSuccessfulRun(t, ctx, "fs", "cp", path.Join(sourceDir, "foo.txt"), targetDir) assertTargetFile(t, context.Background(), targetFiler, "foo.txt") }) } } -func TestAccFsCpFileToDirForWindowsPaths(t *testing.T) { +func TestFsCpFileToDirForWindowsPaths(t *testing.T) { if runtime.GOOS != "windows" { t.Skip("Skipping test on non-windows OS") } @@ -192,11 +197,11 @@ func TestAccFsCpFileToDirForWindowsPaths(t *testing.T) { windowsPath := filepath.Join(filepath.FromSlash(sourceDir), "foo.txt") - RequireSuccessfulRun(t, "fs", "cp", windowsPath, targetDir) + testcli.RequireSuccessfulRun(t, ctx, "fs", "cp", windowsPath, targetDir) assertTargetFile(t, ctx, targetFiler, "foo.txt") } -func TestAccFsCpDirToDirFileNotOverwritten(t *testing.T) { +func TestFsCpDirToDirFileNotOverwritten(t *testing.T) { t.Parallel() for _, testCase := range copyTests() { @@ -205,6 +210,7 @@ func TestAccFsCpDirToDirFileNotOverwritten(t *testing.T) { t.Run(tc.name, func(t *testing.T) { t.Parallel() + ctx := context.Background() sourceFiler, sourceDir := tc.setupSource(t) targetFiler, targetDir := tc.setupTarget(t) setupSourceDir(t, context.Background(), sourceFiler) @@ -213,7 +219,7 @@ func TestAccFsCpDirToDirFileNotOverwritten(t *testing.T) { err := targetFiler.Write(context.Background(), "a/b/c/hello.txt", strings.NewReader("this should not be overwritten"), filer.CreateParentDirectories) require.NoError(t, err) - RequireSuccessfulRun(t, "fs", "cp", sourceDir, targetDir, "--recursive") + testcli.RequireSuccessfulRun(t, ctx, "fs", "cp", sourceDir, targetDir, "--recursive") assertFileContent(t, context.Background(), targetFiler, "a/b/c/hello.txt", "this should not be overwritten") assertFileContent(t, context.Background(), targetFiler, "query.sql", "SELECT 1") assertFileContent(t, context.Background(), targetFiler, "pyNb.py", "# Databricks notebook source\nprint(123)") @@ -221,7 +227,7 @@ func TestAccFsCpDirToDirFileNotOverwritten(t *testing.T) { } } -func TestAccFsCpFileToDirFileNotOverwritten(t *testing.T) { +func TestFsCpFileToDirFileNotOverwritten(t *testing.T) { t.Parallel() for _, testCase := range copyTests() { @@ -230,6 +236,7 @@ func TestAccFsCpFileToDirFileNotOverwritten(t *testing.T) { t.Run(tc.name, func(t *testing.T) { t.Parallel() + ctx := context.Background() sourceFiler, sourceDir := tc.setupSource(t) targetFiler, targetDir := tc.setupTarget(t) setupSourceDir(t, context.Background(), sourceFiler) @@ -238,13 +245,13 @@ func TestAccFsCpFileToDirFileNotOverwritten(t *testing.T) { err := targetFiler.Write(context.Background(), "a/b/c/hello.txt", strings.NewReader("this should not be overwritten"), filer.CreateParentDirectories) require.NoError(t, err) - RequireSuccessfulRun(t, "fs", "cp", path.Join(sourceDir, "a/b/c/hello.txt"), path.Join(targetDir, "a/b/c")) + testcli.RequireSuccessfulRun(t, ctx, "fs", "cp", path.Join(sourceDir, "a/b/c/hello.txt"), path.Join(targetDir, "a/b/c")) assertFileContent(t, context.Background(), targetFiler, "a/b/c/hello.txt", "this should not be overwritten") }) } } -func TestAccFsCpFileToFileFileNotOverwritten(t *testing.T) { +func TestFsCpFileToFileFileNotOverwritten(t *testing.T) { t.Parallel() for _, testCase := range copyTests() { @@ -253,6 +260,7 @@ func TestAccFsCpFileToFileFileNotOverwritten(t *testing.T) { t.Run(tc.name, func(t *testing.T) { t.Parallel() + ctx := context.Background() sourceFiler, sourceDir := tc.setupSource(t) targetFiler, targetDir := tc.setupTarget(t) setupSourceDir(t, context.Background(), sourceFiler) @@ -261,13 +269,13 @@ func TestAccFsCpFileToFileFileNotOverwritten(t *testing.T) { err := targetFiler.Write(context.Background(), "a/b/c/dontoverwrite.txt", strings.NewReader("this should not be overwritten"), filer.CreateParentDirectories) require.NoError(t, err) - RequireSuccessfulRun(t, "fs", "cp", path.Join(sourceDir, "a/b/c/hello.txt"), path.Join(targetDir, "a/b/c/dontoverwrite.txt")) + testcli.RequireSuccessfulRun(t, ctx, "fs", "cp", path.Join(sourceDir, "a/b/c/hello.txt"), path.Join(targetDir, "a/b/c/dontoverwrite.txt")) assertFileContent(t, context.Background(), targetFiler, "a/b/c/dontoverwrite.txt", "this should not be overwritten") }) } } -func TestAccFsCpDirToDirWithOverwriteFlag(t *testing.T) { +func TestFsCpDirToDirWithOverwriteFlag(t *testing.T) { t.Parallel() for _, testCase := range copyTests() { @@ -276,6 +284,7 @@ func TestAccFsCpDirToDirWithOverwriteFlag(t *testing.T) { t.Run(tc.name, func(t *testing.T) { t.Parallel() + ctx := context.Background() sourceFiler, sourceDir := tc.setupSource(t) targetFiler, targetDir := tc.setupTarget(t) setupSourceDir(t, context.Background(), sourceFiler) @@ -284,13 +293,13 @@ func TestAccFsCpDirToDirWithOverwriteFlag(t *testing.T) { err := targetFiler.Write(context.Background(), "a/b/c/hello.txt", strings.NewReader("this should be overwritten"), filer.CreateParentDirectories) require.NoError(t, err) - RequireSuccessfulRun(t, "fs", "cp", sourceDir, targetDir, "--recursive", "--overwrite") + testcli.RequireSuccessfulRun(t, ctx, "fs", "cp", sourceDir, targetDir, "--recursive", "--overwrite") assertTargetDir(t, context.Background(), targetFiler) }) } } -func TestAccFsCpFileToFileWithOverwriteFlag(t *testing.T) { +func TestFsCpFileToFileWithOverwriteFlag(t *testing.T) { t.Parallel() for _, testCase := range copyTests() { @@ -299,6 +308,7 @@ func TestAccFsCpFileToFileWithOverwriteFlag(t *testing.T) { t.Run(tc.name, func(t *testing.T) { t.Parallel() + ctx := context.Background() sourceFiler, sourceDir := tc.setupSource(t) targetFiler, targetDir := tc.setupTarget(t) setupSourceDir(t, context.Background(), sourceFiler) @@ -307,13 +317,13 @@ func TestAccFsCpFileToFileWithOverwriteFlag(t *testing.T) { err := targetFiler.Write(context.Background(), "a/b/c/overwritten.txt", strings.NewReader("this should be overwritten"), filer.CreateParentDirectories) require.NoError(t, err) - RequireSuccessfulRun(t, "fs", "cp", path.Join(sourceDir, "a/b/c/hello.txt"), path.Join(targetDir, "a/b/c/overwritten.txt"), "--overwrite") + testcli.RequireSuccessfulRun(t, ctx, "fs", "cp", path.Join(sourceDir, "a/b/c/hello.txt"), path.Join(targetDir, "a/b/c/overwritten.txt"), "--overwrite") assertFileContent(t, context.Background(), targetFiler, "a/b/c/overwritten.txt", "hello, world\n") }) } } -func TestAccFsCpFileToDirWithOverwriteFlag(t *testing.T) { +func TestFsCpFileToDirWithOverwriteFlag(t *testing.T) { t.Parallel() for _, testCase := range copyTests() { @@ -322,6 +332,7 @@ func TestAccFsCpFileToDirWithOverwriteFlag(t *testing.T) { t.Run(tc.name, func(t *testing.T) { t.Parallel() + ctx := context.Background() sourceFiler, sourceDir := tc.setupSource(t) targetFiler, targetDir := tc.setupTarget(t) setupSourceDir(t, context.Background(), sourceFiler) @@ -330,13 +341,13 @@ func TestAccFsCpFileToDirWithOverwriteFlag(t *testing.T) { err := targetFiler.Write(context.Background(), "a/b/c/hello.txt", strings.NewReader("this should be overwritten"), filer.CreateParentDirectories) require.NoError(t, err) - RequireSuccessfulRun(t, "fs", "cp", path.Join(sourceDir, "a/b/c/hello.txt"), path.Join(targetDir, "a/b/c"), "--overwrite") + testcli.RequireSuccessfulRun(t, ctx, "fs", "cp", path.Join(sourceDir, "a/b/c/hello.txt"), path.Join(targetDir, "a/b/c"), "--overwrite") assertFileContent(t, context.Background(), targetFiler, "a/b/c/hello.txt", "hello, world\n") }) } } -func TestAccFsCpErrorsWhenSourceIsDirWithoutRecursiveFlag(t *testing.T) { +func TestFsCpErrorsWhenSourceIsDirWithoutRecursiveFlag(t *testing.T) { t.Parallel() for _, testCase := range fsTests { @@ -345,23 +356,23 @@ func TestAccFsCpErrorsWhenSourceIsDirWithoutRecursiveFlag(t *testing.T) { t.Run(tc.name, func(t *testing.T) { t.Parallel() + ctx := context.Background() _, tmpDir := tc.setupFiler(t) - _, _, err := RequireErrorRun(t, "fs", "cp", path.Join(tmpDir), path.Join(tmpDir, "foobar")) + _, _, err := testcli.RequireErrorRun(t, ctx, "fs", "cp", path.Join(tmpDir), path.Join(tmpDir, "foobar")) r := regexp.MustCompile("source path .* is a directory. Please specify the --recursive flag") assert.Regexp(t, r, err.Error()) }) } } -func TestAccFsCpErrorsOnInvalidScheme(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) - - _, _, err := RequireErrorRun(t, "fs", "cp", "dbfs:/a", "https:/b") +func TestFsCpErrorsOnInvalidScheme(t *testing.T) { + ctx := context.Background() + _, _, err := testcli.RequireErrorRun(t, ctx, "fs", "cp", "dbfs:/a", "https:/b") assert.Equal(t, "invalid scheme: https", err.Error()) } -func TestAccFsCpSourceIsDirectoryButTargetIsFile(t *testing.T) { +func TestFsCpSourceIsDirectoryButTargetIsFile(t *testing.T) { t.Parallel() for _, testCase := range copyTests() { @@ -370,6 +381,7 @@ func TestAccFsCpSourceIsDirectoryButTargetIsFile(t *testing.T) { t.Run(tc.name, func(t *testing.T) { t.Parallel() + ctx := context.Background() sourceFiler, sourceDir := tc.setupSource(t) targetFiler, targetDir := tc.setupTarget(t) setupSourceDir(t, context.Background(), sourceFiler) @@ -378,7 +390,7 @@ func TestAccFsCpSourceIsDirectoryButTargetIsFile(t *testing.T) { err := targetFiler.Write(context.Background(), "my_target", strings.NewReader("I'll block any attempts to recursively copy"), filer.CreateParentDirectories) require.NoError(t, err) - _, _, err = RequireErrorRun(t, "fs", "cp", sourceDir, path.Join(targetDir, "my_target"), "--recursive") + _, _, err = testcli.RequireErrorRun(t, ctx, "fs", "cp", sourceDir, path.Join(targetDir, "my_target"), "--recursive") assert.Error(t, err) }) } diff --git a/integration/cmd/fs/helpers_test.go b/integration/cmd/fs/helpers_test.go new file mode 100644 index 000000000..e1bebb28f --- /dev/null +++ b/integration/cmd/fs/helpers_test.go @@ -0,0 +1,44 @@ +package fs_test + +import ( + "os" + "path" + "path/filepath" + + "github.com/databricks/cli/integration/internal/acc" + "github.com/databricks/cli/internal/testutil" + + "github.com/databricks/cli/libs/filer" + "github.com/stretchr/testify/require" +) + +func setupLocalFiler(t testutil.TestingT) (filer.Filer, string) { + tmp := t.TempDir() + f, err := filer.NewLocalClient(tmp) + require.NoError(t, err) + + return f, path.Join(filepath.ToSlash(tmp)) +} + +func setupDbfsFiler(t testutil.TestingT) (filer.Filer, string) { + _, wt := acc.WorkspaceTest(t) + + tmpdir := acc.TemporaryDbfsDir(wt) + f, err := filer.NewDbfsClient(wt.W, tmpdir) + require.NoError(t, err) + return f, path.Join("dbfs:/", tmpdir) +} + +func setupUcVolumesFiler(t testutil.TestingT) (filer.Filer, string) { + _, wt := acc.WorkspaceTest(t) + + if os.Getenv("TEST_METASTORE_ID") == "" { + t.Skip("Skipping tests that require a UC Volume when metastore id is not set.") + } + + tmpdir := acc.TemporaryVolume(wt) + f, err := filer.NewFilesClient(wt.W, tmpdir) + require.NoError(t, err) + + return f, path.Join("dbfs:/", tmpdir) +} diff --git a/internal/fs_ls_test.go b/integration/cmd/fs/ls_test.go similarity index 63% rename from internal/fs_ls_test.go rename to integration/cmd/fs/ls_test.go index 994a4a425..25929fdf3 100644 --- a/internal/fs_ls_test.go +++ b/integration/cmd/fs/ls_test.go @@ -1,15 +1,16 @@ -package internal +package fs_test import ( "context" "encoding/json" "io/fs" "path" - "regexp" "strings" "testing" _ "github.com/databricks/cli/cmd/fs" + "github.com/databricks/cli/internal/testcli" + "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/filer" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -17,7 +18,7 @@ import ( type fsTest struct { name string - setupFiler func(t *testing.T) (filer.Filer, string) + setupFiler func(t testutil.TestingT) (filer.Filer, string) } var fsTests = []fsTest{ @@ -38,7 +39,7 @@ func setupLsFiles(t *testing.T, f filer.Filer) { require.NoError(t, err) } -func TestAccFsLs(t *testing.T) { +func TestFsLs(t *testing.T) { t.Parallel() for _, testCase := range fsTests { @@ -47,10 +48,11 @@ func TestAccFsLs(t *testing.T) { t.Run(tc.name, func(t *testing.T) { t.Parallel() + ctx := context.Background() f, tmpDir := tc.setupFiler(t) setupLsFiles(t, f) - stdout, stderr := RequireSuccessfulRun(t, "fs", "ls", tmpDir, "--output=json") + stdout, stderr := testcli.RequireSuccessfulRun(t, ctx, "fs", "ls", tmpDir, "--output=json") assert.Equal(t, "", stderr.String()) var parsedStdout []map[string]any @@ -62,16 +64,16 @@ func TestAccFsLs(t *testing.T) { assert.Equal(t, "a", parsedStdout[0]["name"]) assert.Equal(t, true, parsedStdout[0]["is_directory"]) - assert.Equal(t, float64(0), parsedStdout[0]["size"]) + assert.InDelta(t, float64(0), parsedStdout[0]["size"], 0.0001) assert.Equal(t, "bye.txt", parsedStdout[1]["name"]) assert.Equal(t, false, parsedStdout[1]["is_directory"]) - assert.Equal(t, float64(3), parsedStdout[1]["size"]) + assert.InDelta(t, float64(3), parsedStdout[1]["size"], 0.0001) }) } } -func TestAccFsLsWithAbsolutePaths(t *testing.T) { +func TestFsLsWithAbsolutePaths(t *testing.T) { t.Parallel() for _, testCase := range fsTests { @@ -80,10 +82,11 @@ func TestAccFsLsWithAbsolutePaths(t *testing.T) { t.Run(tc.name, func(t *testing.T) { t.Parallel() + ctx := context.Background() f, tmpDir := tc.setupFiler(t) setupLsFiles(t, f) - stdout, stderr := RequireSuccessfulRun(t, "fs", "ls", tmpDir, "--output=json", "--absolute") + stdout, stderr := testcli.RequireSuccessfulRun(t, ctx, "fs", "ls", tmpDir, "--output=json", "--absolute") assert.Equal(t, "", stderr.String()) var parsedStdout []map[string]any @@ -95,16 +98,16 @@ func TestAccFsLsWithAbsolutePaths(t *testing.T) { assert.Equal(t, path.Join(tmpDir, "a"), parsedStdout[0]["name"]) assert.Equal(t, true, parsedStdout[0]["is_directory"]) - assert.Equal(t, float64(0), parsedStdout[0]["size"]) + assert.InDelta(t, float64(0), parsedStdout[0]["size"], 0.0001) assert.Equal(t, path.Join(tmpDir, "bye.txt"), parsedStdout[1]["name"]) assert.Equal(t, false, parsedStdout[1]["is_directory"]) - assert.Equal(t, float64(3), parsedStdout[1]["size"]) + assert.InDelta(t, float64(3), parsedStdout[1]["size"].(float64), 0.0001) }) } } -func TestAccFsLsOnFile(t *testing.T) { +func TestFsLsOnFile(t *testing.T) { t.Parallel() for _, testCase := range fsTests { @@ -112,17 +115,19 @@ func TestAccFsLsOnFile(t *testing.T) { t.Run(tc.name, func(t *testing.T) { t.Parallel() + + ctx := context.Background() f, tmpDir := tc.setupFiler(t) setupLsFiles(t, f) - _, _, err := RequireErrorRun(t, "fs", "ls", path.Join(tmpDir, "a", "hello.txt"), "--output=json") - assert.Regexp(t, regexp.MustCompile("not a directory: .*/a/hello.txt"), err.Error()) + _, _, err := testcli.RequireErrorRun(t, ctx, "fs", "ls", path.Join(tmpDir, "a", "hello.txt"), "--output=json") + assert.Regexp(t, "not a directory: .*/a/hello.txt", err.Error()) assert.ErrorAs(t, err, &filer.NotADirectory{}) }) } } -func TestAccFsLsOnEmptyDir(t *testing.T) { +func TestFsLsOnEmptyDir(t *testing.T) { t.Parallel() for _, testCase := range fsTests { @@ -131,21 +136,22 @@ func TestAccFsLsOnEmptyDir(t *testing.T) { t.Run(tc.name, func(t *testing.T) { t.Parallel() + ctx := context.Background() _, tmpDir := tc.setupFiler(t) - stdout, stderr := RequireSuccessfulRun(t, "fs", "ls", tmpDir, "--output=json") + stdout, stderr := testcli.RequireSuccessfulRun(t, ctx, "fs", "ls", tmpDir, "--output=json") assert.Equal(t, "", stderr.String()) var parsedStdout []map[string]any err := json.Unmarshal(stdout.Bytes(), &parsedStdout) require.NoError(t, err) // assert on ls output - assert.Equal(t, 0, len(parsedStdout)) + assert.Empty(t, parsedStdout) }) } } -func TestAccFsLsForNonexistingDir(t *testing.T) { +func TestFsLsForNonexistingDir(t *testing.T) { t.Parallel() for _, testCase := range fsTests { @@ -154,20 +160,20 @@ func TestAccFsLsForNonexistingDir(t *testing.T) { t.Run(tc.name, func(t *testing.T) { t.Parallel() + ctx := context.Background() _, tmpDir := tc.setupFiler(t) - _, _, err := RequireErrorRun(t, "fs", "ls", path.Join(tmpDir, "nonexistent"), "--output=json") + _, _, err := testcli.RequireErrorRun(t, ctx, "fs", "ls", path.Join(tmpDir, "nonexistent"), "--output=json") assert.ErrorIs(t, err, fs.ErrNotExist) - assert.Regexp(t, regexp.MustCompile("no such directory: .*/nonexistent"), err.Error()) + assert.Regexp(t, "no such directory: .*/nonexistent", err.Error()) }) } } -func TestAccFsLsWithoutScheme(t *testing.T) { +func TestFsLsWithoutScheme(t *testing.T) { t.Parallel() - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) - - _, _, err := RequireErrorRun(t, "fs", "ls", "/path-without-a-dbfs-scheme", "--output=json") + ctx := context.Background() + _, _, err := testcli.RequireErrorRun(t, ctx, "fs", "ls", "/path-without-a-dbfs-scheme", "--output=json") assert.ErrorIs(t, err, fs.ErrNotExist) } diff --git a/internal/fs_mkdir_test.go b/integration/cmd/fs/mkdir_test.go similarity index 72% rename from internal/fs_mkdir_test.go rename to integration/cmd/fs/mkdir_test.go index 9191f6143..eff0599a7 100644 --- a/internal/fs_mkdir_test.go +++ b/integration/cmd/fs/mkdir_test.go @@ -1,4 +1,4 @@ -package internal +package fs_test import ( "context" @@ -7,12 +7,13 @@ import ( "strings" "testing" + "github.com/databricks/cli/internal/testcli" "github.com/databricks/cli/libs/filer" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) -func TestAccFsMkdir(t *testing.T) { +func TestFsMkdir(t *testing.T) { t.Parallel() for _, testCase := range fsTests { @@ -21,10 +22,11 @@ func TestAccFsMkdir(t *testing.T) { t.Run(tc.name, func(t *testing.T) { t.Parallel() + ctx := context.Background() f, tmpDir := tc.setupFiler(t) // create directory "a" - stdout, stderr := RequireSuccessfulRun(t, "fs", "mkdir", path.Join(tmpDir, "a")) + stdout, stderr := testcli.RequireSuccessfulRun(t, ctx, "fs", "mkdir", path.Join(tmpDir, "a")) assert.Equal(t, "", stderr.String()) assert.Equal(t, "", stdout.String()) @@ -32,12 +34,12 @@ func TestAccFsMkdir(t *testing.T) { info, err := f.Stat(context.Background(), "a") require.NoError(t, err) assert.Equal(t, "a", info.Name()) - assert.Equal(t, true, info.IsDir()) + assert.True(t, info.IsDir()) }) } } -func TestAccFsMkdirCreatesIntermediateDirectories(t *testing.T) { +func TestFsMkdirCreatesIntermediateDirectories(t *testing.T) { t.Parallel() for _, testCase := range fsTests { @@ -46,10 +48,11 @@ func TestAccFsMkdirCreatesIntermediateDirectories(t *testing.T) { t.Run(tc.name, func(t *testing.T) { t.Parallel() + ctx := context.Background() f, tmpDir := tc.setupFiler(t) // create directory "a/b/c" - stdout, stderr := RequireSuccessfulRun(t, "fs", "mkdir", path.Join(tmpDir, "a", "b", "c")) + stdout, stderr := testcli.RequireSuccessfulRun(t, ctx, "fs", "mkdir", path.Join(tmpDir, "a", "b", "c")) assert.Equal(t, "", stderr.String()) assert.Equal(t, "", stdout.String()) @@ -57,24 +60,24 @@ func TestAccFsMkdirCreatesIntermediateDirectories(t *testing.T) { infoA, err := f.Stat(context.Background(), "a") require.NoError(t, err) assert.Equal(t, "a", infoA.Name()) - assert.Equal(t, true, infoA.IsDir()) + assert.True(t, infoA.IsDir()) // assert directory "b" is created infoB, err := f.Stat(context.Background(), "a/b") require.NoError(t, err) assert.Equal(t, "b", infoB.Name()) - assert.Equal(t, true, infoB.IsDir()) + assert.True(t, infoB.IsDir()) // assert directory "c" is created infoC, err := f.Stat(context.Background(), "a/b/c") require.NoError(t, err) assert.Equal(t, "c", infoC.Name()) - assert.Equal(t, true, infoC.IsDir()) + assert.True(t, infoC.IsDir()) }) } } -func TestAccFsMkdirWhenDirectoryAlreadyExists(t *testing.T) { +func TestFsMkdirWhenDirectoryAlreadyExists(t *testing.T) { t.Parallel() for _, testCase := range fsTests { @@ -83,6 +86,7 @@ func TestAccFsMkdirWhenDirectoryAlreadyExists(t *testing.T) { t.Run(tc.name, func(t *testing.T) { t.Parallel() + ctx := context.Background() f, tmpDir := tc.setupFiler(t) // create directory "a" @@ -90,19 +94,20 @@ func TestAccFsMkdirWhenDirectoryAlreadyExists(t *testing.T) { require.NoError(t, err) // assert run is successful without any errors - stdout, stderr := RequireSuccessfulRun(t, "fs", "mkdir", path.Join(tmpDir, "a")) + stdout, stderr := testcli.RequireSuccessfulRun(t, ctx, "fs", "mkdir", path.Join(tmpDir, "a")) assert.Equal(t, "", stderr.String()) assert.Equal(t, "", stdout.String()) }) } } -func TestAccFsMkdirWhenFileExistsAtPath(t *testing.T) { +func TestFsMkdirWhenFileExistsAtPath(t *testing.T) { t.Parallel() t.Run("dbfs", func(t *testing.T) { t.Parallel() + ctx := context.Background() f, tmpDir := setupDbfsFiler(t) // create file "hello" @@ -110,7 +115,7 @@ func TestAccFsMkdirWhenFileExistsAtPath(t *testing.T) { require.NoError(t, err) // assert mkdir fails - _, _, err = RequireErrorRun(t, "fs", "mkdir", path.Join(tmpDir, "hello")) + _, _, err = testcli.RequireErrorRun(t, ctx, "fs", "mkdir", path.Join(tmpDir, "hello")) // Different cloud providers or cloud configurations return different errors. regex := regexp.MustCompile(`(^|: )Path is a file: .*$|(^|: )Cannot create directory .* because .* is an existing file\.$|(^|: )mkdirs\(hadoopPath: .*, permission: rwxrwxrwx\): failed$|(^|: )"The specified path already exists.".*$`) @@ -120,6 +125,7 @@ func TestAccFsMkdirWhenFileExistsAtPath(t *testing.T) { t.Run("uc-volumes", func(t *testing.T) { t.Parallel() + ctx := context.Background() f, tmpDir := setupUcVolumesFiler(t) // create file "hello" @@ -127,7 +133,7 @@ func TestAccFsMkdirWhenFileExistsAtPath(t *testing.T) { require.NoError(t, err) // assert mkdir fails - _, _, err = RequireErrorRun(t, "fs", "mkdir", path.Join(tmpDir, "hello")) + _, _, err = testcli.RequireErrorRun(t, ctx, "fs", "mkdir", path.Join(tmpDir, "hello")) assert.ErrorAs(t, err, &filer.FileAlreadyExistsError{}) }) diff --git a/internal/fs_rm_test.go b/integration/cmd/fs/rm_test.go similarity index 77% rename from internal/fs_rm_test.go rename to integration/cmd/fs/rm_test.go index e86f5713b..018c7920e 100644 --- a/internal/fs_rm_test.go +++ b/integration/cmd/fs/rm_test.go @@ -1,4 +1,4 @@ -package internal +package fs_test import ( "context" @@ -7,12 +7,13 @@ import ( "strings" "testing" + "github.com/databricks/cli/internal/testcli" "github.com/databricks/cli/libs/filer" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) -func TestAccFsRmFile(t *testing.T) { +func TestFsRmFile(t *testing.T) { t.Parallel() for _, testCase := range fsTests { @@ -22,6 +23,7 @@ func TestAccFsRmFile(t *testing.T) { t.Parallel() // Create a file + ctx := context.Background() f, tmpDir := tc.setupFiler(t) err := f.Write(context.Background(), "hello.txt", strings.NewReader("abcd"), filer.CreateParentDirectories) require.NoError(t, err) @@ -31,7 +33,7 @@ func TestAccFsRmFile(t *testing.T) { assert.NoError(t, err) // Run rm command - stdout, stderr := RequireSuccessfulRun(t, "fs", "rm", path.Join(tmpDir, "hello.txt")) + stdout, stderr := testcli.RequireSuccessfulRun(t, ctx, "fs", "rm", path.Join(tmpDir, "hello.txt")) assert.Equal(t, "", stderr.String()) assert.Equal(t, "", stdout.String()) @@ -42,7 +44,7 @@ func TestAccFsRmFile(t *testing.T) { } } -func TestAccFsRmEmptyDir(t *testing.T) { +func TestFsRmEmptyDir(t *testing.T) { t.Parallel() for _, testCase := range fsTests { @@ -52,6 +54,7 @@ func TestAccFsRmEmptyDir(t *testing.T) { t.Parallel() // Create a directory + ctx := context.Background() f, tmpDir := tc.setupFiler(t) err := f.Mkdir(context.Background(), "a") require.NoError(t, err) @@ -61,7 +64,7 @@ func TestAccFsRmEmptyDir(t *testing.T) { assert.NoError(t, err) // Run rm command - stdout, stderr := RequireSuccessfulRun(t, "fs", "rm", path.Join(tmpDir, "a")) + stdout, stderr := testcli.RequireSuccessfulRun(t, ctx, "fs", "rm", path.Join(tmpDir, "a")) assert.Equal(t, "", stderr.String()) assert.Equal(t, "", stdout.String()) @@ -72,7 +75,7 @@ func TestAccFsRmEmptyDir(t *testing.T) { } } -func TestAccFsRmNonEmptyDirectory(t *testing.T) { +func TestFsRmNonEmptyDirectory(t *testing.T) { t.Parallel() for _, testCase := range fsTests { @@ -82,6 +85,7 @@ func TestAccFsRmNonEmptyDirectory(t *testing.T) { t.Parallel() // Create a directory + ctx := context.Background() f, tmpDir := tc.setupFiler(t) err := f.Mkdir(context.Background(), "a") require.NoError(t, err) @@ -95,14 +99,14 @@ func TestAccFsRmNonEmptyDirectory(t *testing.T) { assert.NoError(t, err) // Run rm command - _, _, err = RequireErrorRun(t, "fs", "rm", path.Join(tmpDir, "a")) + _, _, err = testcli.RequireErrorRun(t, ctx, "fs", "rm", path.Join(tmpDir, "a")) assert.ErrorIs(t, err, fs.ErrInvalid) assert.ErrorAs(t, err, &filer.DirectoryNotEmptyError{}) }) } } -func TestAccFsRmForNonExistentFile(t *testing.T) { +func TestFsRmForNonExistentFile(t *testing.T) { t.Parallel() for _, testCase := range fsTests { @@ -111,17 +115,17 @@ func TestAccFsRmForNonExistentFile(t *testing.T) { t.Run(tc.name, func(t *testing.T) { t.Parallel() + ctx := context.Background() _, tmpDir := tc.setupFiler(t) // Expect error if file does not exist - _, _, err := RequireErrorRun(t, "fs", "rm", path.Join(tmpDir, "does-not-exist")) + _, _, err := testcli.RequireErrorRun(t, ctx, "fs", "rm", path.Join(tmpDir, "does-not-exist")) assert.ErrorIs(t, err, fs.ErrNotExist) }) } - } -func TestAccFsRmDirRecursively(t *testing.T) { +func TestFsRmDirRecursively(t *testing.T) { t.Parallel() for _, testCase := range fsTests { @@ -130,6 +134,7 @@ func TestAccFsRmDirRecursively(t *testing.T) { t.Run(tc.name, func(t *testing.T) { t.Parallel() + ctx := context.Background() f, tmpDir := tc.setupFiler(t) // Create a directory @@ -145,7 +150,7 @@ func TestAccFsRmDirRecursively(t *testing.T) { assert.NoError(t, err) // Run rm command - stdout, stderr := RequireSuccessfulRun(t, "fs", "rm", path.Join(tmpDir, "a"), "--recursive") + stdout, stderr := testcli.RequireSuccessfulRun(t, ctx, "fs", "rm", path.Join(tmpDir, "a"), "--recursive") assert.Equal(t, "", stderr.String()) assert.Equal(t, "", stdout.String()) diff --git a/integration/cmd/jobs/jobs_test.go b/integration/cmd/jobs/jobs_test.go new file mode 100644 index 000000000..7ebc135a3 --- /dev/null +++ b/integration/cmd/jobs/jobs_test.go @@ -0,0 +1,24 @@ +package jobs_test + +import ( + "context" + "encoding/json" + "strconv" + "testing" + + "github.com/databricks/cli/internal/testcli" + "github.com/databricks/cli/internal/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestCreateJob(t *testing.T) { + testutil.Require(t, testutil.Azure) + ctx := context.Background() + stdout, stderr := testcli.RequireSuccessfulRun(t, ctx, "jobs", "create", "--json", "@testdata/create_job_without_workers.json", "--log-level=debug") + assert.Empty(t, stderr.String()) + var output map[string]int + err := json.Unmarshal(stdout.Bytes(), &output) + require.NoError(t, err) + testcli.RequireSuccessfulRun(t, ctx, "jobs", "delete", strconv.Itoa(output["job_id"]), "--log-level=debug") +} diff --git a/internal/testjsons/create_job_without_workers.json b/integration/cmd/jobs/testdata/create_job_without_workers.json similarity index 100% rename from internal/testjsons/create_job_without_workers.json rename to integration/cmd/jobs/testdata/create_job_without_workers.json diff --git a/internal/repos_test.go b/integration/cmd/repos/repos_test.go similarity index 55% rename from internal/repos_test.go rename to integration/cmd/repos/repos_test.go index 1ad0e8775..7526a14ca 100644 --- a/internal/repos_test.go +++ b/integration/cmd/repos/repos_test.go @@ -1,4 +1,4 @@ -package internal +package repos_test import ( "context" @@ -6,6 +6,9 @@ import ( "strconv" "testing" + "github.com/databricks/cli/integration/internal/acc" + "github.com/databricks/cli/internal/testcli" + "github.com/databricks/cli/internal/testutil" "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/apierr" "github.com/databricks/databricks-sdk-go/service/workspace" @@ -13,10 +16,12 @@ import ( "github.com/stretchr/testify/require" ) +const repoUrl = "https://github.com/databricks/databricks-empty-ide-project.git" + func synthesizeTemporaryRepoPath(t *testing.T, w *databricks.WorkspaceClient, ctx context.Context) string { me, err := w.CurrentUser.Me(ctx) require.NoError(t, err) - repoPath := fmt.Sprintf("/Repos/%s/%s", me.UserName, RandomName("empty-repo-integration-")) + repoPath := fmt.Sprintf("/Repos/%s/%s", me.UserName, testutil.RandomName("empty-repo-integration-")) // Cleanup if repo was created at specified path. t.Cleanup(func() { @@ -43,15 +48,12 @@ func createTemporaryRepo(t *testing.T, w *databricks.WorkspaceClient, ctx contex return repoInfo.Id, repoPath } -func TestAccReposCreateWithProvider(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) - - ctx := context.Background() - w, err := databricks.NewWorkspaceClient() - require.NoError(t, err) +func TestReposCreateWithProvider(t *testing.T) { + ctx, wt := acc.WorkspaceTest(t) + w := wt.W repoPath := synthesizeTemporaryRepoPath(t, w, ctx) - _, stderr := RequireSuccessfulRun(t, "repos", "create", repoUrl, "gitHub", "--path", repoPath) + _, stderr := testcli.RequireSuccessfulRun(t, ctx, "repos", "create", repoUrl, "gitHub", "--path", repoPath) assert.Equal(t, "", stderr.String()) // Confirm the repo was created. @@ -60,15 +62,12 @@ func TestAccReposCreateWithProvider(t *testing.T) { assert.Equal(t, workspace.ObjectTypeRepo, oi.ObjectType) } -func TestAccReposCreateWithoutProvider(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) - - ctx := context.Background() - w, err := databricks.NewWorkspaceClient() - require.NoError(t, err) +func TestReposCreateWithoutProvider(t *testing.T) { + ctx, wt := acc.WorkspaceTest(t) + w := wt.W repoPath := synthesizeTemporaryRepoPath(t, w, ctx) - _, stderr := RequireSuccessfulRun(t, "repos", "create", repoUrl, "--path", repoPath) + _, stderr := testcli.RequireSuccessfulRun(t, ctx, "repos", "create", repoUrl, "--path", repoPath) assert.Equal(t, "", stderr.String()) // Confirm the repo was created. @@ -77,90 +76,78 @@ func TestAccReposCreateWithoutProvider(t *testing.T) { assert.Equal(t, workspace.ObjectTypeRepo, oi.ObjectType) } -func TestAccReposGet(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) - - ctx := context.Background() - w, err := databricks.NewWorkspaceClient() - require.NoError(t, err) +func TestReposGet(t *testing.T) { + ctx, wt := acc.WorkspaceTest(t) + w := wt.W repoId, repoPath := createTemporaryRepo(t, w, ctx) // Get by ID - byIdOutput, stderr := RequireSuccessfulRun(t, "repos", "get", strconv.FormatInt(repoId, 10), "--output=json") + byIdOutput, stderr := testcli.RequireSuccessfulRun(t, ctx, "repos", "get", strconv.FormatInt(repoId, 10), "--output=json") assert.Equal(t, "", stderr.String()) // Get by path - byPathOutput, stderr := RequireSuccessfulRun(t, "repos", "get", repoPath, "--output=json") + byPathOutput, stderr := testcli.RequireSuccessfulRun(t, ctx, "repos", "get", repoPath, "--output=json") assert.Equal(t, "", stderr.String()) // Output should be the same assert.Equal(t, byIdOutput.String(), byPathOutput.String()) // Get by path fails - _, stderr, err = RequireErrorRun(t, "repos", "get", repoPath+"-doesntexist", "--output=json") + _, stderr, err := testcli.RequireErrorRun(t, ctx, "repos", "get", repoPath+"-doesntexist", "--output=json") assert.ErrorContains(t, err, "failed to look up repo") // Get by path resolves to something other than a repo - _, stderr, err = RequireErrorRun(t, "repos", "get", "/Repos", "--output=json") + _, stderr, err = testcli.RequireErrorRun(t, ctx, "repos", "get", "/Repos", "--output=json") assert.ErrorContains(t, err, "is not a repo") } -func TestAccReposUpdate(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) - - ctx := context.Background() - w, err := databricks.NewWorkspaceClient() - require.NoError(t, err) +func TestReposUpdate(t *testing.T) { + ctx, wt := acc.WorkspaceTest(t) + w := wt.W repoId, repoPath := createTemporaryRepo(t, w, ctx) // Update by ID - byIdOutput, stderr := RequireSuccessfulRun(t, "repos", "update", strconv.FormatInt(repoId, 10), "--branch", "ide") + byIdOutput, stderr := testcli.RequireSuccessfulRun(t, ctx, "repos", "update", strconv.FormatInt(repoId, 10), "--branch", "ide") assert.Equal(t, "", stderr.String()) // Update by path - byPathOutput, stderr := RequireSuccessfulRun(t, "repos", "update", repoPath, "--branch", "ide") + byPathOutput, stderr := testcli.RequireSuccessfulRun(t, ctx, "repos", "update", repoPath, "--branch", "ide") assert.Equal(t, "", stderr.String()) // Output should be the same assert.Equal(t, byIdOutput.String(), byPathOutput.String()) } -func TestAccReposDeleteByID(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) - - ctx := context.Background() - w, err := databricks.NewWorkspaceClient() - require.NoError(t, err) +func TestReposDeleteByID(t *testing.T) { + ctx, wt := acc.WorkspaceTest(t) + w := wt.W repoId, _ := createTemporaryRepo(t, w, ctx) // Delete by ID - stdout, stderr := RequireSuccessfulRun(t, "repos", "delete", strconv.FormatInt(repoId, 10)) + stdout, stderr := testcli.RequireSuccessfulRun(t, ctx, "repos", "delete", strconv.FormatInt(repoId, 10)) assert.Equal(t, "", stdout.String()) assert.Equal(t, "", stderr.String()) // Check it was actually deleted - _, err = w.Repos.GetByRepoId(ctx, repoId) + _, err := w.Repos.GetByRepoId(ctx, repoId) assert.True(t, apierr.IsMissing(err), err) } -func TestAccReposDeleteByPath(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) - - ctx := context.Background() - w, err := databricks.NewWorkspaceClient() - require.NoError(t, err) +func TestReposDeleteByPath(t *testing.T) { + ctx, wt := acc.WorkspaceTest(t) + w := wt.W repoId, repoPath := createTemporaryRepo(t, w, ctx) // Delete by path - stdout, stderr := RequireSuccessfulRun(t, "repos", "delete", repoPath) + stdout, stderr := testcli.RequireSuccessfulRun(t, ctx, "repos", "delete", repoPath) assert.Equal(t, "", stdout.String()) assert.Equal(t, "", stderr.String()) // Check it was actually deleted - _, err = w.Repos.GetByRepoId(ctx, repoId) + _, err := w.Repos.GetByRepoId(ctx, repoId) assert.True(t, apierr.IsMissing(err), err) } diff --git a/internal/secrets_test.go b/integration/cmd/secrets/secrets_test.go similarity index 76% rename from internal/secrets_test.go rename to integration/cmd/secrets/secrets_test.go index 59e5d6150..43ad54de2 100644 --- a/internal/secrets_test.go +++ b/integration/cmd/secrets/secrets_test.go @@ -1,4 +1,4 @@ -package internal +package secrets_test import ( "context" @@ -6,19 +6,22 @@ import ( "fmt" "testing" - "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/integration/internal/acc" + "github.com/databricks/cli/internal/testcli" + "github.com/databricks/cli/internal/testutil" "github.com/databricks/databricks-sdk-go/service/workspace" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestSecretsCreateScopeErrWhenNoArguments(t *testing.T) { - _, _, err := RequireErrorRun(t, "secrets", "create-scope") + ctx := context.Background() + _, _, err := testcli.RequireErrorRun(t, ctx, "secrets", "create-scope") assert.Contains(t, err.Error(), "accepts 1 arg(s), received 0") } func temporarySecretScope(ctx context.Context, t *acc.WorkspaceT) string { - scope := acc.RandomName("cli-acc-") + scope := testutil.RandomName("cli-acc-") err := t.W.Secrets.CreateScope(ctx, workspace.CreateScope{ Scope: scope, }) @@ -61,13 +64,13 @@ func assertSecretBytesValue(t *acc.WorkspaceT, scope, key string, expected []byt assert.Equal(t, expected, decoded) } -func TestAccSecretsPutSecretStringValue(tt *testing.T) { +func TestSecretsPutSecretStringValue(tt *testing.T) { ctx, t := acc.WorkspaceTest(tt) scope := temporarySecretScope(ctx, t) key := "test-key" value := "test-value\nwith-newlines\n" - stdout, stderr := RequireSuccessfulRun(t.T, "secrets", "put-secret", scope, key, "--string-value", value) + stdout, stderr := testcli.RequireSuccessfulRun(t, ctx, "secrets", "put-secret", scope, key, "--string-value", value) assert.Empty(t, stdout) assert.Empty(t, stderr) @@ -75,13 +78,13 @@ func TestAccSecretsPutSecretStringValue(tt *testing.T) { assertSecretBytesValue(t, scope, key, []byte(value)) } -func TestAccSecretsPutSecretBytesValue(tt *testing.T) { +func TestSecretsPutSecretBytesValue(tt *testing.T) { ctx, t := acc.WorkspaceTest(tt) scope := temporarySecretScope(ctx, t) key := "test-key" value := []byte{0x00, 0x01, 0x02, 0x03} - stdout, stderr := RequireSuccessfulRun(t.T, "secrets", "put-secret", scope, key, "--bytes-value", string(value)) + stdout, stderr := testcli.RequireSuccessfulRun(t, ctx, "secrets", "put-secret", scope, key, "--bytes-value", string(value)) assert.Empty(t, stdout) assert.Empty(t, stderr) diff --git a/integration/cmd/storage_credentials/storage_credentials_test.go b/integration/cmd/storage_credentials/storage_credentials_test.go new file mode 100644 index 000000000..e4b861312 --- /dev/null +++ b/integration/cmd/storage_credentials/storage_credentials_test.go @@ -0,0 +1,21 @@ +package storage_credentials_test + +import ( + "testing" + + "github.com/databricks/cli/integration/internal/acc" + "github.com/databricks/cli/internal/testcli" + "github.com/databricks/cli/internal/testutil" + "github.com/stretchr/testify/assert" +) + +func TestStorageCredentialsListRendersResponse(t *testing.T) { + ctx, _ := acc.WorkspaceTest(t) + + // Check if metastore is assigned for the workspace, otherwise test will fail + t.Log(testutil.GetEnvOrSkipTest(t, "TEST_METASTORE_ID")) + + stdout, stderr := testcli.RequireSuccessfulRun(t, ctx, "storage-credentials", "list") + assert.NotEmpty(t, stdout) + assert.Empty(t, stderr) +} diff --git a/internal/sync_test.go b/integration/cmd/sync/sync_test.go similarity index 84% rename from internal/sync_test.go rename to integration/cmd/sync/sync_test.go index 6f8b1827b..632497054 100644 --- a/internal/sync_test.go +++ b/integration/cmd/sync/sync_test.go @@ -1,4 +1,4 @@ -package internal +package sync_test import ( "context" @@ -15,7 +15,9 @@ import ( "testing" "time" - _ "github.com/databricks/cli/cmd/sync" + "github.com/databricks/cli/integration/internal/acc" + "github.com/databricks/cli/internal/testcli" + "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/filer" "github.com/databricks/cli/libs/sync" "github.com/databricks/cli/libs/testfile" @@ -36,7 +38,7 @@ var ( func setupRepo(t *testing.T, wsc *databricks.WorkspaceClient, ctx context.Context) (localRoot, remoteRoot string) { me, err := wsc.CurrentUser.Me(ctx) require.NoError(t, err) - repoPath := fmt.Sprintf("/Repos/%s/%s", me.UserName, RandomName("empty-repo-sync-integration-")) + repoPath := fmt.Sprintf("/Repos/%s/%s", me.UserName, testutil.RandomName("empty-repo-sync-integration-")) repoInfo, err := wsc.Repos.Create(ctx, workspace.CreateRepoRequest{ Path: repoPath, @@ -63,19 +65,19 @@ func setupRepo(t *testing.T, wsc *databricks.WorkspaceClient, ctx context.Contex type syncTest struct { t *testing.T - c *cobraTestRunner + c *testcli.Runner w *databricks.WorkspaceClient f filer.Filer localRoot string remoteRoot string } -func setupSyncTest(t *testing.T, args ...string) *syncTest { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) +func setupSyncTest(t *testing.T, args ...string) (context.Context, *syncTest) { + ctx, wt := acc.WorkspaceTest(t) + w := wt.W - w := databricks.Must(databricks.NewWorkspaceClient()) localRoot := t.TempDir() - remoteRoot := TemporaryWorkspaceDir(t, w) + remoteRoot := acc.TemporaryWorkspaceDir(wt, "sync-") f, err := filer.NewWorkspaceFilesClient(w, remoteRoot) require.NoError(t, err) @@ -88,10 +90,10 @@ func setupSyncTest(t *testing.T, args ...string) *syncTest { "json", }, args...) - c := NewCobraTestRunner(t, args...) + c := testcli.NewRunner(t, ctx, args...) c.RunBackground() - return &syncTest{ + return ctx, &syncTest{ t: t, c: c, w: w, @@ -109,7 +111,7 @@ func (s *syncTest) waitForCompletionMarker() { select { case <-ctx.Done(): s.t.Fatal("timed out waiting for sync to complete") - case line := <-s.c.stdoutLines: + case line := <-s.c.StdoutLines: var event sync.EventBase err := json.Unmarshal([]byte(line), &event) require.NoError(s.t, err) @@ -145,14 +147,11 @@ func (a *syncTest) remoteDirContent(ctx context.Context, relativeDir string, exp } } -func (a *syncTest) remoteFileContent(ctx context.Context, relativePath string, expectedContent string) { +func (a *syncTest) remoteFileContent(ctx context.Context, relativePath, expectedContent string) { filePath := path.Join(a.remoteRoot, relativePath) // Remove leading "/" so we can use it in the URL. - urlPath := fmt.Sprintf( - "/api/2.0/workspace-files/%s", - strings.TrimLeft(filePath, "/"), - ) + urlPath := "/api/2.0/workspace-files/" + strings.TrimLeft(filePath, "/") apiClient, err := client.New(a.w.Config) require.NoError(a.t, err) @@ -181,7 +180,7 @@ func (a *syncTest) touchFile(ctx context.Context, path string) { require.NoError(a.t, err) } -func (a *syncTest) objectType(ctx context.Context, relativePath string, expected string) { +func (a *syncTest) objectType(ctx context.Context, relativePath, expected string) { path := path.Join(a.remoteRoot, relativePath) a.c.Eventually(func() bool { @@ -193,7 +192,7 @@ func (a *syncTest) objectType(ctx context.Context, relativePath string, expected }, 30*time.Second, 5*time.Second) } -func (a *syncTest) language(ctx context.Context, relativePath string, expected string) { +func (a *syncTest) language(ctx context.Context, relativePath, expected string) { path := path.Join(a.remoteRoot, relativePath) a.c.Eventually(func() bool { @@ -223,14 +222,13 @@ func (a *syncTest) snapshotContains(files []string) { assert.Equal(a.t, s.RemotePath, a.remoteRoot) for _, filePath := range files { _, ok := s.LastModifiedTimes[filePath] - assert.True(a.t, ok, fmt.Sprintf("%s not in snapshot file: %v", filePath, s.LastModifiedTimes)) + assert.True(a.t, ok, "%s not in snapshot file: %v", filePath, s.LastModifiedTimes) } assert.Equal(a.t, len(files), len(s.LastModifiedTimes)) } -func TestAccSyncFullFileSync(t *testing.T) { - ctx := context.Background() - assertSync := setupSyncTest(t, "--full", "--watch") +func TestSyncFullFileSync(t *testing.T) { + ctx, assertSync := setupSyncTest(t, "--full", "--watch") // .gitignore is created by the sync process to enforce .databricks is not synced assertSync.waitForCompletionMarker() @@ -260,9 +258,8 @@ func TestAccSyncFullFileSync(t *testing.T) { assertSync.remoteDirContent(ctx, "", append(repoFiles, ".gitignore")) } -func TestAccSyncIncrementalFileSync(t *testing.T) { - ctx := context.Background() - assertSync := setupSyncTest(t, "--watch") +func TestSyncIncrementalFileSync(t *testing.T) { + ctx, assertSync := setupSyncTest(t, "--watch") // .gitignore is created by the sync process to enforce .databricks is not synced assertSync.waitForCompletionMarker() @@ -294,9 +291,8 @@ func TestAccSyncIncrementalFileSync(t *testing.T) { assertSync.snapshotContains(append(repoFiles, ".gitignore")) } -func TestAccSyncNestedFolderSync(t *testing.T) { - ctx := context.Background() - assertSync := setupSyncTest(t, "--watch") +func TestSyncNestedFolderSync(t *testing.T) { + ctx, assertSync := setupSyncTest(t, "--watch") // .gitignore is created by the sync process to enforce .databricks is not synced assertSync.waitForCompletionMarker() @@ -322,9 +318,8 @@ func TestAccSyncNestedFolderSync(t *testing.T) { assertSync.snapshotContains(append(repoFiles, ".gitignore")) } -func TestAccSyncNestedFolderDoesntFailOnNonEmptyDirectory(t *testing.T) { - ctx := context.Background() - assertSync := setupSyncTest(t, "--watch") +func TestSyncNestedFolderDoesntFailOnNonEmptyDirectory(t *testing.T) { + ctx, assertSync := setupSyncTest(t, "--watch") // .gitignore is created by the sync process to enforce .databricks is not synced assertSync.waitForCompletionMarker() @@ -355,9 +350,8 @@ func TestAccSyncNestedFolderDoesntFailOnNonEmptyDirectory(t *testing.T) { assertSync.remoteExists(ctx, "dir1") } -func TestAccSyncNestedSpacePlusAndHashAreEscapedSync(t *testing.T) { - ctx := context.Background() - assertSync := setupSyncTest(t, "--watch") +func TestSyncNestedSpacePlusAndHashAreEscapedSync(t *testing.T) { + ctx, assertSync := setupSyncTest(t, "--watch") // .gitignore is created by the sync process to enforce .databricks is not synced assertSync.waitForCompletionMarker() @@ -391,9 +385,8 @@ func TestAccSyncNestedSpacePlusAndHashAreEscapedSync(t *testing.T) { // // In the above scenario sync should delete the empty folder and add foo to the remote // file system -func TestAccSyncIncrementalFileOverwritesFolder(t *testing.T) { - ctx := context.Background() - assertSync := setupSyncTest(t, "--watch") +func TestSyncIncrementalFileOverwritesFolder(t *testing.T) { + ctx, assertSync := setupSyncTest(t, "--watch") // create foo/bar.txt localFilePath := filepath.Join(assertSync.localRoot, "foo/bar.txt") @@ -421,9 +414,8 @@ func TestAccSyncIncrementalFileOverwritesFolder(t *testing.T) { assertSync.snapshotContains(append(repoFiles, ".gitignore", "foo")) } -func TestAccSyncIncrementalSyncPythonNotebookToFile(t *testing.T) { - ctx := context.Background() - assertSync := setupSyncTest(t, "--watch") +func TestSyncIncrementalSyncPythonNotebookToFile(t *testing.T) { + ctx, assertSync := setupSyncTest(t, "--watch") // create python notebook localFilePath := filepath.Join(assertSync.localRoot, "foo.py") @@ -452,9 +444,8 @@ func TestAccSyncIncrementalSyncPythonNotebookToFile(t *testing.T) { assertSync.snapshotContains(append(repoFiles, ".gitignore")) } -func TestAccSyncIncrementalSyncFileToPythonNotebook(t *testing.T) { - ctx := context.Background() - assertSync := setupSyncTest(t, "--watch") +func TestSyncIncrementalSyncFileToPythonNotebook(t *testing.T) { + ctx, assertSync := setupSyncTest(t, "--watch") // create vanilla python file localFilePath := filepath.Join(assertSync.localRoot, "foo.py") @@ -476,9 +467,8 @@ func TestAccSyncIncrementalSyncFileToPythonNotebook(t *testing.T) { assertSync.snapshotContains(append(repoFiles, ".gitignore", "foo.py")) } -func TestAccSyncIncrementalSyncPythonNotebookDelete(t *testing.T) { - ctx := context.Background() - assertSync := setupSyncTest(t, "--watch") +func TestSyncIncrementalSyncPythonNotebookDelete(t *testing.T) { + ctx, assertSync := setupSyncTest(t, "--watch") // create python notebook localFilePath := filepath.Join(assertSync.localRoot, "foo.py") @@ -498,17 +488,15 @@ func TestAccSyncIncrementalSyncPythonNotebookDelete(t *testing.T) { assertSync.remoteDirContent(ctx, "", append(repoFiles, ".gitignore")) } -func TestAccSyncEnsureRemotePathIsUsableIfRepoDoesntExist(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) - - wsc := databricks.Must(databricks.NewWorkspaceClient()) - ctx := context.Background() +func TestSyncEnsureRemotePathIsUsableIfRepoDoesntExist(t *testing.T) { + ctx, wt := acc.WorkspaceTest(t) + wsc := wt.W me, err := wsc.CurrentUser.Me(ctx) require.NoError(t, err) // Hypothetical repo path doesn't exist. - nonExistingRepoPath := fmt.Sprintf("/Repos/%s/%s", me.UserName, RandomName("doesnt-exist-")) + nonExistingRepoPath := fmt.Sprintf("/Repos/%s/%s", me.UserName, testutil.RandomName("doesnt-exist-")) err = sync.EnsureRemotePathIsUsable(ctx, wsc, nonExistingRepoPath, nil) assert.ErrorContains(t, err, " does not exist; please create it first") @@ -518,11 +506,10 @@ func TestAccSyncEnsureRemotePathIsUsableIfRepoDoesntExist(t *testing.T) { assert.ErrorContains(t, err, " does not exist; please create it first") } -func TestAccSyncEnsureRemotePathIsUsableIfRepoExists(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) +func TestSyncEnsureRemotePathIsUsableIfRepoExists(t *testing.T) { + ctx, wt := acc.WorkspaceTest(t) + wsc := wt.W - wsc := databricks.Must(databricks.NewWorkspaceClient()) - ctx := context.Background() _, remoteRepoPath := setupRepo(t, wsc, ctx) // Repo itself is usable. @@ -540,15 +527,14 @@ func TestAccSyncEnsureRemotePathIsUsableIfRepoExists(t *testing.T) { require.Equal(t, workspace.ObjectTypeDirectory, info.ObjectType) } -func TestAccSyncEnsureRemotePathIsUsableInWorkspace(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) +func TestSyncEnsureRemotePathIsUsableInWorkspace(t *testing.T) { + ctx, wt := acc.WorkspaceTest(t) + wsc := wt.W - wsc := databricks.Must(databricks.NewWorkspaceClient()) - ctx := context.Background() me, err := wsc.CurrentUser.Me(ctx) require.NoError(t, err) - remotePath := fmt.Sprintf("/Users/%s/%s", me.UserName, RandomName("ensure-path-exists-test-")) + remotePath := fmt.Sprintf("/Users/%s/%s", me.UserName, testutil.RandomName("ensure-path-exists-test-")) err = sync.EnsureRemotePathIsUsable(ctx, wsc, remotePath, me) assert.NoError(t, err) diff --git a/internal/unknown_command_test.go b/integration/cmd/unknown_command_test.go similarity index 63% rename from internal/unknown_command_test.go rename to integration/cmd/unknown_command_test.go index 62b84027f..fd87a77ff 100644 --- a/internal/unknown_command_test.go +++ b/integration/cmd/unknown_command_test.go @@ -1,13 +1,16 @@ -package internal +package cmd_test import ( + "context" "testing" + "github.com/databricks/cli/internal/testcli" assert "github.com/databricks/cli/libs/dyn/dynassert" ) func TestUnknownCommand(t *testing.T) { - stdout, stderr, err := RequireErrorRun(t, "unknown-command") + ctx := context.Background() + stdout, stderr, err := testcli.RequireErrorRun(t, ctx, "unknown-command") assert.Error(t, err, "unknown command", `unknown command "unknown-command" for "databricks"`) assert.Equal(t, "", stdout.String()) diff --git a/internal/version_test.go b/integration/cmd/version/version_test.go similarity index 66% rename from internal/version_test.go rename to integration/cmd/version/version_test.go index 7dba63cd8..b12974d69 100644 --- a/internal/version_test.go +++ b/integration/cmd/version/version_test.go @@ -1,36 +1,42 @@ -package internal +package version_test import ( + "context" "encoding/json" "fmt" "testing" "github.com/databricks/cli/internal/build" + "github.com/databricks/cli/internal/testcli" "github.com/stretchr/testify/assert" ) var expectedVersion = fmt.Sprintf("Databricks CLI v%s\n", build.GetInfo().Version) func TestVersionFlagShort(t *testing.T) { - stdout, stderr := RequireSuccessfulRun(t, "-v") + ctx := context.Background() + stdout, stderr := testcli.RequireSuccessfulRun(t, ctx, "-v") assert.Equal(t, expectedVersion, stdout.String()) assert.Equal(t, "", stderr.String()) } func TestVersionFlagLong(t *testing.T) { - stdout, stderr := RequireSuccessfulRun(t, "--version") + ctx := context.Background() + stdout, stderr := testcli.RequireSuccessfulRun(t, ctx, "--version") assert.Equal(t, expectedVersion, stdout.String()) assert.Equal(t, "", stderr.String()) } func TestVersionCommand(t *testing.T) { - stdout, stderr := RequireSuccessfulRun(t, "version") + ctx := context.Background() + stdout, stderr := testcli.RequireSuccessfulRun(t, ctx, "version") assert.Equal(t, expectedVersion, stdout.String()) assert.Equal(t, "", stderr.String()) } func TestVersionCommandWithJSONOutput(t *testing.T) { - stdout, stderr := RequireSuccessfulRun(t, "version", "--output", "json") + ctx := context.Background() + stdout, stderr := testcli.RequireSuccessfulRun(t, ctx, "version", "--output", "json") assert.NotEmpty(t, stdout.String()) assert.Equal(t, "", stderr.String()) diff --git a/internal/testdata/import_dir/a/b/c/file-b b/integration/cmd/workspace/testdata/import_dir/a/b/c/file-b similarity index 100% rename from internal/testdata/import_dir/a/b/c/file-b rename to integration/cmd/workspace/testdata/import_dir/a/b/c/file-b diff --git a/internal/testdata/import_dir/file-a b/integration/cmd/workspace/testdata/import_dir/file-a similarity index 100% rename from internal/testdata/import_dir/file-a rename to integration/cmd/workspace/testdata/import_dir/file-a diff --git a/internal/testdata/import_dir/jupyterNotebook.ipynb b/integration/cmd/workspace/testdata/import_dir/jupyterNotebook.ipynb similarity index 100% rename from internal/testdata/import_dir/jupyterNotebook.ipynb rename to integration/cmd/workspace/testdata/import_dir/jupyterNotebook.ipynb diff --git a/internal/testdata/import_dir/pyNotebook.py b/integration/cmd/workspace/testdata/import_dir/pyNotebook.py similarity index 100% rename from internal/testdata/import_dir/pyNotebook.py rename to integration/cmd/workspace/testdata/import_dir/pyNotebook.py diff --git a/internal/testdata/import_dir/rNotebook.r b/integration/cmd/workspace/testdata/import_dir/rNotebook.r similarity index 100% rename from internal/testdata/import_dir/rNotebook.r rename to integration/cmd/workspace/testdata/import_dir/rNotebook.r diff --git a/internal/testdata/import_dir/scalaNotebook.scala b/integration/cmd/workspace/testdata/import_dir/scalaNotebook.scala similarity index 100% rename from internal/testdata/import_dir/scalaNotebook.scala rename to integration/cmd/workspace/testdata/import_dir/scalaNotebook.scala diff --git a/internal/testdata/import_dir/sqlNotebook.sql b/integration/cmd/workspace/testdata/import_dir/sqlNotebook.sql similarity index 100% rename from internal/testdata/import_dir/sqlNotebook.sql rename to integration/cmd/workspace/testdata/import_dir/sqlNotebook.sql diff --git a/internal/workspace_test.go b/integration/cmd/workspace/workspace_test.go similarity index 76% rename from internal/workspace_test.go rename to integration/cmd/workspace/workspace_test.go index 445361654..c376a87d2 100644 --- a/internal/workspace_test.go +++ b/integration/cmd/workspace/workspace_test.go @@ -1,4 +1,4 @@ -package internal +package workspace_test import ( "context" @@ -11,18 +11,17 @@ import ( "strings" "testing" - "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/integration/internal/acc" + "github.com/databricks/cli/internal/testcli" "github.com/databricks/cli/libs/filer" - "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/service/workspace" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) -func TestAccWorkspaceList(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) - - stdout, stderr := RequireSuccessfulRun(t, "workspace", "list", "/") +func TestWorkspaceList(t *testing.T) { + ctx := context.Background() + stdout, stderr := testcli.RequireSuccessfulRun(t, ctx, "workspace", "list", "/") outStr := stdout.String() assert.Contains(t, outStr, "ID") assert.Contains(t, outStr, "Type") @@ -32,21 +31,22 @@ func TestAccWorkspaceList(t *testing.T) { } func TestWorkpaceListErrorWhenNoArguments(t *testing.T) { - _, _, err := RequireErrorRun(t, "workspace", "list") + ctx := context.Background() + _, _, err := testcli.RequireErrorRun(t, ctx, "workspace", "list") assert.Contains(t, err.Error(), "accepts 1 arg(s), received 0") } func TestWorkpaceGetStatusErrorWhenNoArguments(t *testing.T) { - _, _, err := RequireErrorRun(t, "workspace", "get-status") + ctx := context.Background() + _, _, err := testcli.RequireErrorRun(t, ctx, "workspace", "get-status") assert.Contains(t, err.Error(), "accepts 1 arg(s), received 0") } -func TestAccWorkpaceExportPrintsContents(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) +func TestWorkpaceExportPrintsContents(t *testing.T) { + ctx, wt := acc.WorkspaceTest(t) + w := wt.W - ctx := context.Background() - w := databricks.Must(databricks.NewWorkspaceClient()) - tmpdir := TemporaryWorkspaceDir(t, w) + tmpdir := acc.TemporaryWorkspaceDir(wt, "workspace-export-") f, err := filer.NewWorkspaceFilesClient(w, tmpdir) require.NoError(t, err) @@ -56,29 +56,30 @@ func TestAccWorkpaceExportPrintsContents(t *testing.T) { require.NoError(t, err) // Run export - stdout, stderr := RequireSuccessfulRun(t, "workspace", "export", path.Join(tmpdir, "file-a")) + stdout, stderr := testcli.RequireSuccessfulRun(t, ctx, "workspace", "export", path.Join(tmpdir, "file-a")) assert.Equal(t, contents, stdout.String()) assert.Equal(t, "", stderr.String()) } func setupWorkspaceImportExportTest(t *testing.T) (context.Context, filer.Filer, string) { ctx, wt := acc.WorkspaceTest(t) + w := wt.W - tmpdir := TemporaryWorkspaceDir(t, wt.W) - f, err := filer.NewWorkspaceFilesClient(wt.W, tmpdir) + tmpdir := acc.TemporaryWorkspaceDir(wt, "workspace-import-") + f, err := filer.NewWorkspaceFilesClient(w, tmpdir) require.NoError(t, err) return ctx, f, tmpdir } -func assertLocalFileContents(t *testing.T, path string, content string) { +func assertLocalFileContents(t *testing.T, path, content string) { require.FileExists(t, path) b, err := os.ReadFile(path) require.NoError(t, err) assert.Contains(t, string(b), content) } -func assertFilerFileContents(t *testing.T, ctx context.Context, f filer.Filer, path string, content string) { +func assertFilerFileContents(t *testing.T, ctx context.Context, f filer.Filer, path, content string) { r, err := f.Read(ctx, path) require.NoError(t, err) b, err := io.ReadAll(r) @@ -92,7 +93,7 @@ func assertWorkspaceFileType(t *testing.T, ctx context.Context, f filer.Filer, p assert.Equal(t, fileType, info.Sys().(workspace.ObjectInfo).ObjectType) } -func TestAccExportDir(t *testing.T) { +func TestExportDir(t *testing.T) { ctx, f, sourceDir := setupWorkspaceImportExportTest(t) targetDir := t.TempDir() @@ -113,7 +114,7 @@ func TestAccExportDir(t *testing.T) { require.NoError(t, err) expectedLogs := strings.Join([]string{ - fmt.Sprintf("Exporting files from %s", sourceDir), + "Exporting files from " + sourceDir, fmt.Sprintf("%s -> %s", path.Join(sourceDir, "a/b/c/file-b"), filepath.Join(targetDir, "a/b/c/file-b")), fmt.Sprintf("%s -> %s", path.Join(sourceDir, "file-a"), filepath.Join(targetDir, "file-a")), fmt.Sprintf("%s -> %s", path.Join(sourceDir, "pyNotebook"), filepath.Join(targetDir, "pyNotebook.py")), @@ -124,7 +125,7 @@ func TestAccExportDir(t *testing.T) { }, "\n") // Run Export - stdout, stderr := RequireSuccessfulRun(t, "workspace", "export-dir", sourceDir, targetDir) + stdout, stderr := testcli.RequireSuccessfulRun(t, ctx, "workspace", "export-dir", sourceDir, targetDir) assert.Equal(t, expectedLogs, stdout.String()) assert.Equal(t, "", stderr.String()) @@ -137,7 +138,7 @@ func TestAccExportDir(t *testing.T) { assertLocalFileContents(t, filepath.Join(targetDir, "a/b/c/file-b"), "def") } -func TestAccExportDirDoesNotOverwrite(t *testing.T) { +func TestExportDirDoesNotOverwrite(t *testing.T) { ctx, f, sourceDir := setupWorkspaceImportExportTest(t) targetDir := t.TempDir() @@ -152,13 +153,13 @@ func TestAccExportDirDoesNotOverwrite(t *testing.T) { require.NoError(t, err) // Run Export - RequireSuccessfulRun(t, "workspace", "export-dir", sourceDir, targetDir) + testcli.RequireSuccessfulRun(t, ctx, "workspace", "export-dir", sourceDir, targetDir) // Assert file is not overwritten assertLocalFileContents(t, filepath.Join(targetDir, "file-a"), "local content") } -func TestAccExportDirWithOverwriteFlag(t *testing.T) { +func TestExportDirWithOverwriteFlag(t *testing.T) { ctx, f, sourceDir := setupWorkspaceImportExportTest(t) targetDir := t.TempDir() @@ -173,18 +174,18 @@ func TestAccExportDirWithOverwriteFlag(t *testing.T) { require.NoError(t, err) // Run Export - RequireSuccessfulRun(t, "workspace", "export-dir", sourceDir, targetDir, "--overwrite") + testcli.RequireSuccessfulRun(t, ctx, "workspace", "export-dir", sourceDir, targetDir, "--overwrite") // Assert file has been overwritten assertLocalFileContents(t, filepath.Join(targetDir, "file-a"), "content from workspace") } -func TestAccImportDir(t *testing.T) { +func TestImportDir(t *testing.T) { ctx, workspaceFiler, targetDir := setupWorkspaceImportExportTest(t) - stdout, stderr := RequireSuccessfulRun(t, "workspace", "import-dir", "./testdata/import_dir", targetDir, "--log-level=debug") + stdout, stderr := testcli.RequireSuccessfulRun(t, ctx, "workspace", "import-dir", "./testdata/import_dir", targetDir, "--log-level=debug") expectedLogs := strings.Join([]string{ - fmt.Sprintf("Importing files from %s", "./testdata/import_dir"), + "Importing files from " + "./testdata/import_dir", fmt.Sprintf("%s -> %s", filepath.FromSlash("a/b/c/file-b"), path.Join(targetDir, "a/b/c/file-b")), fmt.Sprintf("%s -> %s", filepath.FromSlash("file-a"), path.Join(targetDir, "file-a")), fmt.Sprintf("%s -> %s", filepath.FromSlash("jupyterNotebook.ipynb"), path.Join(targetDir, "jupyterNotebook")), @@ -208,7 +209,7 @@ func TestAccImportDir(t *testing.T) { assertFilerFileContents(t, ctx, workspaceFiler, "jupyterNotebook", "# Databricks notebook source\nprint(\"jupyter\")") } -func TestAccImportDirDoesNotOverwrite(t *testing.T) { +func TestImportDirDoesNotOverwrite(t *testing.T) { ctx, workspaceFiler, targetDir := setupWorkspaceImportExportTest(t) var err error @@ -222,7 +223,7 @@ func TestAccImportDirDoesNotOverwrite(t *testing.T) { assertFilerFileContents(t, ctx, workspaceFiler, "file-a", "old file") assertFilerFileContents(t, ctx, workspaceFiler, "pyNotebook", "# Databricks notebook source\nprint(\"old notebook\")") - RequireSuccessfulRun(t, "workspace", "import-dir", "./testdata/import_dir", targetDir) + testcli.RequireSuccessfulRun(t, ctx, "workspace", "import-dir", "./testdata/import_dir", targetDir) // Assert files are imported assertFilerFileContents(t, ctx, workspaceFiler, "a/b/c/file-b", "file-in-dir") @@ -236,7 +237,7 @@ func TestAccImportDirDoesNotOverwrite(t *testing.T) { assertFilerFileContents(t, ctx, workspaceFiler, "pyNotebook", "# Databricks notebook source\nprint(\"old notebook\")") } -func TestAccImportDirWithOverwriteFlag(t *testing.T) { +func TestImportDirWithOverwriteFlag(t *testing.T) { ctx, workspaceFiler, targetDir := setupWorkspaceImportExportTest(t) var err error @@ -250,7 +251,7 @@ func TestAccImportDirWithOverwriteFlag(t *testing.T) { assertFilerFileContents(t, ctx, workspaceFiler, "file-a", "old file") assertFilerFileContents(t, ctx, workspaceFiler, "pyNotebook", "# Databricks notebook source\nprint(\"old notebook\")") - RequireSuccessfulRun(t, "workspace", "import-dir", "./testdata/import_dir", targetDir, "--overwrite") + testcli.RequireSuccessfulRun(t, ctx, "workspace", "import-dir", "./testdata/import_dir", targetDir, "--overwrite") // Assert files are imported assertFilerFileContents(t, ctx, workspaceFiler, "a/b/c/file-b", "file-in-dir") @@ -264,7 +265,7 @@ func TestAccImportDirWithOverwriteFlag(t *testing.T) { assertFilerFileContents(t, ctx, workspaceFiler, "pyNotebook", "# Databricks notebook source\nprint(\"python\")") } -func TestAccExport(t *testing.T) { +func TestExport(t *testing.T) { ctx, f, sourceDir := setupWorkspaceImportExportTest(t) var err error @@ -272,7 +273,7 @@ func TestAccExport(t *testing.T) { // Export vanilla file err = f.Write(ctx, "file-a", strings.NewReader("abc")) require.NoError(t, err) - stdout, _ := RequireSuccessfulRun(t, "workspace", "export", path.Join(sourceDir, "file-a")) + stdout, _ := testcli.RequireSuccessfulRun(t, ctx, "workspace", "export", path.Join(sourceDir, "file-a")) b, err := io.ReadAll(&stdout) require.NoError(t, err) assert.Equal(t, "abc", string(b)) @@ -280,20 +281,20 @@ func TestAccExport(t *testing.T) { // Export python notebook err = f.Write(ctx, "pyNotebook.py", strings.NewReader("# Databricks notebook source")) require.NoError(t, err) - stdout, _ = RequireSuccessfulRun(t, "workspace", "export", path.Join(sourceDir, "pyNotebook")) + stdout, _ = testcli.RequireSuccessfulRun(t, ctx, "workspace", "export", path.Join(sourceDir, "pyNotebook")) b, err = io.ReadAll(&stdout) require.NoError(t, err) assert.Equal(t, "# Databricks notebook source\n", string(b)) // Export python notebook as jupyter - stdout, _ = RequireSuccessfulRun(t, "workspace", "export", path.Join(sourceDir, "pyNotebook"), "--format", "JUPYTER") + stdout, _ = testcli.RequireSuccessfulRun(t, ctx, "workspace", "export", path.Join(sourceDir, "pyNotebook"), "--format", "JUPYTER") b, err = io.ReadAll(&stdout) require.NoError(t, err) assert.Contains(t, string(b), `"cells":`, "jupyter notebooks contain the cells field") assert.Contains(t, string(b), `"metadata":`, "jupyter notebooks contain the metadata field") } -func TestAccExportWithFileFlag(t *testing.T) { +func TestExportWithFileFlag(t *testing.T) { ctx, f, sourceDir := setupWorkspaceImportExportTest(t) localTmpDir := t.TempDir() @@ -302,7 +303,7 @@ func TestAccExportWithFileFlag(t *testing.T) { // Export vanilla file err = f.Write(ctx, "file-a", strings.NewReader("abc")) require.NoError(t, err) - stdout, _ := RequireSuccessfulRun(t, "workspace", "export", path.Join(sourceDir, "file-a"), "--file", filepath.Join(localTmpDir, "file.txt")) + stdout, _ := testcli.RequireSuccessfulRun(t, ctx, "workspace", "export", path.Join(sourceDir, "file-a"), "--file", filepath.Join(localTmpDir, "file.txt")) b, err := io.ReadAll(&stdout) require.NoError(t, err) // Expect nothing to be printed to stdout @@ -312,14 +313,14 @@ func TestAccExportWithFileFlag(t *testing.T) { // Export python notebook err = f.Write(ctx, "pyNotebook.py", strings.NewReader("# Databricks notebook source")) require.NoError(t, err) - stdout, _ = RequireSuccessfulRun(t, "workspace", "export", path.Join(sourceDir, "pyNotebook"), "--file", filepath.Join(localTmpDir, "pyNb.py")) + stdout, _ = testcli.RequireSuccessfulRun(t, ctx, "workspace", "export", path.Join(sourceDir, "pyNotebook"), "--file", filepath.Join(localTmpDir, "pyNb.py")) b, err = io.ReadAll(&stdout) require.NoError(t, err) assert.Equal(t, "", string(b)) assertLocalFileContents(t, filepath.Join(localTmpDir, "pyNb.py"), "# Databricks notebook source\n") // Export python notebook as jupyter - stdout, _ = RequireSuccessfulRun(t, "workspace", "export", path.Join(sourceDir, "pyNotebook"), "--format", "JUPYTER", "--file", filepath.Join(localTmpDir, "jupyterNb.ipynb")) + stdout, _ = testcli.RequireSuccessfulRun(t, ctx, "workspace", "export", path.Join(sourceDir, "pyNotebook"), "--format", "JUPYTER", "--file", filepath.Join(localTmpDir, "jupyterNb.ipynb")) b, err = io.ReadAll(&stdout) require.NoError(t, err) assert.Equal(t, "", string(b)) @@ -327,75 +328,75 @@ func TestAccExportWithFileFlag(t *testing.T) { assertLocalFileContents(t, filepath.Join(localTmpDir, "jupyterNb.ipynb"), `"metadata":`) } -func TestAccImportFileUsingContentFormatSource(t *testing.T) { +func TestImportFileUsingContentFormatSource(t *testing.T) { ctx, workspaceFiler, targetDir := setupWorkspaceImportExportTest(t) // Content = `print(1)`. Uploaded as a notebook by default - RequireSuccessfulRun(t, "workspace", "import", path.Join(targetDir, "pyScript"), + testcli.RequireSuccessfulRun(t, ctx, "workspace", "import", path.Join(targetDir, "pyScript"), "--content", base64.StdEncoding.EncodeToString([]byte("print(1)")), "--language=PYTHON") assertFilerFileContents(t, ctx, workspaceFiler, "pyScript", "print(1)") assertWorkspaceFileType(t, ctx, workspaceFiler, "pyScript", workspace.ObjectTypeNotebook) // Import with content = `# Databricks notebook source\nprint(1)`. Uploaded as a notebook with the content just being print(1) - RequireSuccessfulRun(t, "workspace", "import", path.Join(targetDir, "pyNb"), + testcli.RequireSuccessfulRun(t, ctx, "workspace", "import", path.Join(targetDir, "pyNb"), "--content", base64.StdEncoding.EncodeToString([]byte("`# Databricks notebook source\nprint(1)")), "--language=PYTHON") assertFilerFileContents(t, ctx, workspaceFiler, "pyNb", "print(1)") assertWorkspaceFileType(t, ctx, workspaceFiler, "pyNb", workspace.ObjectTypeNotebook) } -func TestAccImportFileUsingContentFormatAuto(t *testing.T) { +func TestImportFileUsingContentFormatAuto(t *testing.T) { ctx, workspaceFiler, targetDir := setupWorkspaceImportExportTest(t) // Content = `# Databricks notebook source\nprint(1)`. Upload as file if path has no extension. - RequireSuccessfulRun(t, "workspace", "import", path.Join(targetDir, "py-nb-as-file"), + testcli.RequireSuccessfulRun(t, ctx, "workspace", "import", path.Join(targetDir, "py-nb-as-file"), "--content", base64.StdEncoding.EncodeToString([]byte("`# Databricks notebook source\nprint(1)")), "--format=AUTO") assertFilerFileContents(t, ctx, workspaceFiler, "py-nb-as-file", "# Databricks notebook source\nprint(1)") assertWorkspaceFileType(t, ctx, workspaceFiler, "py-nb-as-file", workspace.ObjectTypeFile) // Content = `# Databricks notebook source\nprint(1)`. Upload as notebook if path has py extension - RequireSuccessfulRun(t, "workspace", "import", path.Join(targetDir, "py-nb-as-notebook.py"), + testcli.RequireSuccessfulRun(t, ctx, "workspace", "import", path.Join(targetDir, "py-nb-as-notebook.py"), "--content", base64.StdEncoding.EncodeToString([]byte("`# Databricks notebook source\nprint(1)")), "--format=AUTO") assertFilerFileContents(t, ctx, workspaceFiler, "py-nb-as-notebook", "# Databricks notebook source\nprint(1)") assertWorkspaceFileType(t, ctx, workspaceFiler, "py-nb-as-notebook", workspace.ObjectTypeNotebook) // Content = `print(1)`. Upload as file if content is not notebook (even if path has .py extension) - RequireSuccessfulRun(t, "workspace", "import", path.Join(targetDir, "not-a-notebook.py"), "--content", + testcli.RequireSuccessfulRun(t, ctx, "workspace", "import", path.Join(targetDir, "not-a-notebook.py"), "--content", base64.StdEncoding.EncodeToString([]byte("print(1)")), "--format=AUTO") assertFilerFileContents(t, ctx, workspaceFiler, "not-a-notebook.py", "print(1)") assertWorkspaceFileType(t, ctx, workspaceFiler, "not-a-notebook.py", workspace.ObjectTypeFile) } -func TestAccImportFileFormatSource(t *testing.T) { +func TestImportFileFormatSource(t *testing.T) { ctx, workspaceFiler, targetDir := setupWorkspaceImportExportTest(t) - RequireSuccessfulRun(t, "workspace", "import", path.Join(targetDir, "pyNotebook"), "--file", "./testdata/import_dir/pyNotebook.py", "--language=PYTHON") + testcli.RequireSuccessfulRun(t, ctx, "workspace", "import", path.Join(targetDir, "pyNotebook"), "--file", "./testdata/import_dir/pyNotebook.py", "--language=PYTHON") assertFilerFileContents(t, ctx, workspaceFiler, "pyNotebook", "# Databricks notebook source\nprint(\"python\")") assertWorkspaceFileType(t, ctx, workspaceFiler, "pyNotebook", workspace.ObjectTypeNotebook) - RequireSuccessfulRun(t, "workspace", "import", path.Join(targetDir, "scalaNotebook"), "--file", "./testdata/import_dir/scalaNotebook.scala", "--language=SCALA") + testcli.RequireSuccessfulRun(t, ctx, "workspace", "import", path.Join(targetDir, "scalaNotebook"), "--file", "./testdata/import_dir/scalaNotebook.scala", "--language=SCALA") assertFilerFileContents(t, ctx, workspaceFiler, "scalaNotebook", "// Databricks notebook source\nprintln(\"scala\")") assertWorkspaceFileType(t, ctx, workspaceFiler, "scalaNotebook", workspace.ObjectTypeNotebook) - _, _, err := RequireErrorRun(t, "workspace", "import", path.Join(targetDir, "scalaNotebook"), "--file", "./testdata/import_dir/scalaNotebook.scala") + _, _, err := testcli.RequireErrorRun(t, ctx, "workspace", "import", path.Join(targetDir, "scalaNotebook"), "--file", "./testdata/import_dir/scalaNotebook.scala") assert.ErrorContains(t, err, "The zip file may not be valid or may be an unsupported version. Hint: Objects imported using format=SOURCE are expected to be zip encoded databricks source notebook(s) by default. Please specify a language using the --language flag if you are trying to import a single uncompressed notebook") } -func TestAccImportFileFormatAuto(t *testing.T) { +func TestImportFileFormatAuto(t *testing.T) { ctx, workspaceFiler, targetDir := setupWorkspaceImportExportTest(t) // Upload as file if path has no extension - RequireSuccessfulRun(t, "workspace", "import", path.Join(targetDir, "py-nb-as-file"), "--file", "./testdata/import_dir/pyNotebook.py", "--format=AUTO") + testcli.RequireSuccessfulRun(t, ctx, "workspace", "import", path.Join(targetDir, "py-nb-as-file"), "--file", "./testdata/import_dir/pyNotebook.py", "--format=AUTO") assertFilerFileContents(t, ctx, workspaceFiler, "py-nb-as-file", "# Databricks notebook source") assertFilerFileContents(t, ctx, workspaceFiler, "py-nb-as-file", "print(\"python\")") assertWorkspaceFileType(t, ctx, workspaceFiler, "py-nb-as-file", workspace.ObjectTypeFile) // Upload as notebook if path has extension - RequireSuccessfulRun(t, "workspace", "import", path.Join(targetDir, "py-nb-as-notebook.py"), "--file", "./testdata/import_dir/pyNotebook.py", "--format=AUTO") + testcli.RequireSuccessfulRun(t, ctx, "workspace", "import", path.Join(targetDir, "py-nb-as-notebook.py"), "--file", "./testdata/import_dir/pyNotebook.py", "--format=AUTO") assertFilerFileContents(t, ctx, workspaceFiler, "py-nb-as-notebook", "# Databricks notebook source\nprint(\"python\")") assertWorkspaceFileType(t, ctx, workspaceFiler, "py-nb-as-notebook", workspace.ObjectTypeNotebook) // Upload as file if content is not notebook (even if path has .py extension) - RequireSuccessfulRun(t, "workspace", "import", path.Join(targetDir, "not-a-notebook.py"), "--file", "./testdata/import_dir/file-a", "--format=AUTO") + testcli.RequireSuccessfulRun(t, ctx, "workspace", "import", path.Join(targetDir, "not-a-notebook.py"), "--file", "./testdata/import_dir/file-a", "--format=AUTO") assertFilerFileContents(t, ctx, workspaceFiler, "not-a-notebook.py", "hello, world") assertWorkspaceFileType(t, ctx, workspaceFiler, "not-a-notebook.py", workspace.ObjectTypeFile) } diff --git a/internal/acc/debug.go b/integration/internal/acc/debug.go similarity index 84% rename from internal/acc/debug.go rename to integration/internal/acc/debug.go index 116631132..08e385b09 100644 --- a/internal/acc/debug.go +++ b/integration/internal/acc/debug.go @@ -6,18 +6,19 @@ import ( "path" "path/filepath" "strings" - "testing" + + "github.com/databricks/cli/internal/testutil" ) // Detects if test is run from "debug test" feature in VS Code. -func isInDebug() bool { +func IsInDebug() bool { ex, _ := os.Executable() return strings.HasPrefix(path.Base(ex), "__debug_bin") } // Loads debug environment from ~/.databricks/debug-env.json. -func loadDebugEnvIfRunFromIDE(t *testing.T, key string) { - if !isInDebug() { +func loadDebugEnvIfRunFromIDE(t testutil.TestingT, key string) { + if !IsInDebug() { return } home, err := os.UserHomeDir() diff --git a/integration/internal/acc/fixtures.go b/integration/internal/acc/fixtures.go new file mode 100644 index 000000000..2367d228f --- /dev/null +++ b/integration/internal/acc/fixtures.go @@ -0,0 +1,133 @@ +package acc + +import ( + "fmt" + + "github.com/databricks/cli/internal/testutil" + "github.com/databricks/databricks-sdk-go/apierr" + "github.com/databricks/databricks-sdk-go/service/catalog" + "github.com/databricks/databricks-sdk-go/service/files" + "github.com/databricks/databricks-sdk-go/service/workspace" + "github.com/stretchr/testify/require" +) + +func TemporaryWorkspaceDir(t *WorkspaceT, name ...string) string { + ctx := t.ctx + me, err := t.W.CurrentUser.Me(ctx) + require.NoError(t, err) + + // Prefix the name with "integration-test-" to make it easier to identify. + name = append([]string{"integration-test-"}, name...) + basePath := fmt.Sprintf("/Users/%s/%s", me.UserName, testutil.RandomName(name...)) + + t.Logf("Creating workspace directory %s", basePath) + err = t.W.Workspace.MkdirsByPath(ctx, basePath) + require.NoError(t, err) + + // Remove test directory on test completion. + t.Cleanup(func() { + t.Logf("Removing workspace directory %s", basePath) + err := t.W.Workspace.Delete(ctx, workspace.Delete{ + Path: basePath, + Recursive: true, + }) + if err == nil || apierr.IsMissing(err) { + return + } + t.Logf("Unable to remove temporary workspace directory %s: %#v", basePath, err) + }) + + return basePath +} + +func TemporaryDbfsDir(t *WorkspaceT, name ...string) string { + ctx := t.ctx + + // Prefix the name with "integration-test-" to make it easier to identify. + name = append([]string{"integration-test-"}, name...) + path := "/tmp/" + testutil.RandomName(name...) + + t.Logf("Creating DBFS directory %s", path) + err := t.W.Dbfs.MkdirsByPath(ctx, path) + require.NoError(t, err) + + t.Cleanup(func() { + t.Logf("Removing DBFS directory %s", path) + err := t.W.Dbfs.Delete(ctx, files.Delete{ + Path: path, + Recursive: true, + }) + if err == nil || apierr.IsMissing(err) { + return + } + t.Logf("Unable to remove temporary DBFS directory %s: %#v", path, err) + }) + + return path +} + +func TemporaryRepo(t *WorkspaceT, url string) string { + ctx := t.ctx + me, err := t.W.CurrentUser.Me(ctx) + require.NoError(t, err) + + // Prefix the path with "integration-test-" to make it easier to identify. + path := fmt.Sprintf("/Repos/%s/%s", me.UserName, testutil.RandomName("integration-test-")) + + t.Logf("Creating repo: %s", path) + resp, err := t.W.Repos.Create(ctx, workspace.CreateRepoRequest{ + Url: url, + Path: path, + Provider: "gitHub", + }) + require.NoError(t, err) + + t.Cleanup(func() { + t.Logf("Removing repo: %s", path) + err := t.W.Repos.Delete(ctx, workspace.DeleteRepoRequest{ + RepoId: resp.Id, + }) + if err == nil || apierr.IsMissing(err) { + return + } + t.Logf("Unable to remove repo %s: %#v", path, err) + }) + + return path +} + +// Create a new Unity Catalog volume in a catalog called "main" in the workspace. +func TemporaryVolume(t *WorkspaceT) string { + ctx := t.ctx + w := t.W + + // Create a schema + schema, err := w.Schemas.Create(ctx, catalog.CreateSchema{ + CatalogName: "main", + Name: testutil.RandomName("test-schema-"), + }) + require.NoError(t, err) + t.Cleanup(func() { + err := w.Schemas.Delete(ctx, catalog.DeleteSchemaRequest{ + FullName: schema.FullName, + }) + require.NoError(t, err) + }) + + // Create a volume + volume, err := w.Volumes.Create(ctx, catalog.CreateVolumeRequestContent{ + CatalogName: "main", + SchemaName: schema.Name, + Name: "my-volume", + VolumeType: catalog.VolumeTypeManaged, + }) + require.NoError(t, err) + t.Cleanup(func() { + err := w.Volumes.Delete(ctx, catalog.DeleteVolumeRequest{ + Name: volume.FullName, + }) + require.NoError(t, err) + }) + + return fmt.Sprintf("/Volumes/%s/%s/%s", "main", schema.Name, volume.Name) +} diff --git a/internal/acc/workspace.go b/integration/internal/acc/workspace.go similarity index 57% rename from internal/acc/workspace.go rename to integration/internal/acc/workspace.go index 69ab0e715..64deda7c1 100644 --- a/internal/acc/workspace.go +++ b/integration/internal/acc/workspace.go @@ -2,19 +2,16 @@ package acc import ( "context" - "fmt" "os" - "testing" + "github.com/databricks/cli/internal/testutil" "github.com/databricks/databricks-sdk-go" - "github.com/databricks/databricks-sdk-go/apierr" "github.com/databricks/databricks-sdk-go/service/compute" - "github.com/databricks/databricks-sdk-go/service/workspace" "github.com/stretchr/testify/require" ) type WorkspaceT struct { - *testing.T + testutil.TestingT W *databricks.WorkspaceClient @@ -23,16 +20,17 @@ type WorkspaceT struct { exec *compute.CommandExecutorV2 } -func WorkspaceTest(t *testing.T) (context.Context, *WorkspaceT) { +func WorkspaceTest(t testutil.TestingT) (context.Context, *WorkspaceT) { + t.Helper() loadDebugEnvIfRunFromIDE(t, "workspace") - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) + t.Logf("CLOUD_ENV=%s", testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) w, err := databricks.NewWorkspaceClient() require.NoError(t, err) wt := &WorkspaceT{ - T: t, + TestingT: t, W: w, @@ -43,10 +41,11 @@ func WorkspaceTest(t *testing.T) (context.Context, *WorkspaceT) { } // Run the workspace test only on UC workspaces. -func UcWorkspaceTest(t *testing.T) (context.Context, *WorkspaceT) { +func UcWorkspaceTest(t testutil.TestingT) (context.Context, *WorkspaceT) { + t.Helper() loadDebugEnvIfRunFromIDE(t, "workspace") - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) + t.Logf("CLOUD_ENV=%s", testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) if os.Getenv("TEST_METASTORE_ID") == "" { t.Skipf("Skipping on non-UC workspaces") @@ -59,7 +58,7 @@ func UcWorkspaceTest(t *testing.T) (context.Context, *WorkspaceT) { require.NoError(t, err) wt := &WorkspaceT{ - T: t, + TestingT: t, W: w, @@ -70,19 +69,21 @@ func UcWorkspaceTest(t *testing.T) (context.Context, *WorkspaceT) { } func (t *WorkspaceT) TestClusterID() string { - clusterID := GetEnvOrSkipTest(t.T, "TEST_BRICKS_CLUSTER_ID") + t.Helper() + clusterID := testutil.GetEnvOrSkipTest(t, "TEST_BRICKS_CLUSTER_ID") err := t.W.Clusters.EnsureClusterIsRunning(t.ctx, clusterID) - require.NoError(t, err) + require.NoError(t, err, "Unexpected error from EnsureClusterIsRunning for clusterID=%s", clusterID) return clusterID } func (t *WorkspaceT) RunPython(code string) (string, error) { + t.Helper() var err error // Create command executor only once per test. if t.exec == nil { t.exec, err = t.W.CommandExecution.Start(t.ctx, t.TestClusterID(), compute.LanguagePython) - require.NoError(t, err) + require.NoError(t, err, "Unexpected error from CommandExecution.Start(clusterID=%v)", t.TestClusterID()) t.Cleanup(func() { err := t.exec.Destroy(t.ctx) @@ -91,36 +92,9 @@ func (t *WorkspaceT) RunPython(code string) (string, error) { } results, err := t.exec.Execute(t.ctx, code) - require.NoError(t, err) + require.NoError(t, err, "Unexpected error from Execute(%v)", code) require.NotEqual(t, compute.ResultTypeError, results.ResultType, results.Cause) output, ok := results.Data.(string) require.True(t, ok, "unexpected type %T", results.Data) return output, nil } - -func (t *WorkspaceT) TemporaryWorkspaceDir(name ...string) string { - ctx := context.Background() - me, err := t.W.CurrentUser.Me(ctx) - require.NoError(t, err) - - basePath := fmt.Sprintf("/Users/%s/%s", me.UserName, RandomName(name...)) - - t.Logf("Creating %s", basePath) - err = t.W.Workspace.MkdirsByPath(ctx, basePath) - require.NoError(t, err) - - // Remove test directory on test completion. - t.Cleanup(func() { - t.Logf("Removing %s", basePath) - err := t.W.Workspace.Delete(ctx, workspace.Delete{ - Path: basePath, - Recursive: true, - }) - if err == nil || apierr.IsMissing(err) { - return - } - t.Logf("Unable to remove temporary workspace directory %s: %#v", basePath, err) - }) - - return basePath -} diff --git a/internal/filer_test.go b/integration/libs/filer/filer_test.go similarity index 85% rename from internal/filer_test.go rename to integration/libs/filer/filer_test.go index 4e6a15671..21c839e1b 100644 --- a/internal/filer_test.go +++ b/integration/libs/filer/filer_test.go @@ -1,17 +1,16 @@ -package internal +package filer_test import ( "bytes" "context" "encoding/json" - "errors" "io" "io/fs" "path" - "regexp" "strings" "testing" + "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/filer" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -22,7 +21,7 @@ type filerTest struct { filer.Filer } -func (f filerTest) assertContents(ctx context.Context, name string, contents string) { +func (f filerTest) assertContents(ctx context.Context, name, contents string) { reader, err := f.Read(ctx, name) if !assert.NoError(f, err) { return @@ -39,7 +38,7 @@ func (f filerTest) assertContents(ctx context.Context, name string, contents str assert.Equal(f, contents, body.String()) } -func (f filerTest) assertContentsJupyter(ctx context.Context, name string, language string) { +func (f filerTest) assertContentsJupyter(ctx context.Context, name, language string) { reader, err := f.Read(ctx, name) if !assert.NoError(f, err) { return @@ -105,7 +104,7 @@ func commonFilerRecursiveDeleteTest(t *testing.T, ctx context.Context, f filer.F for _, e := range entriesBeforeDelete { names = append(names, e.Name()) } - assert.Equal(t, names, []string{"file1", "file2", "subdir1", "subdir2"}) + assert.Equal(t, []string{"file1", "file2", "subdir1", "subdir2"}, names) err = f.Delete(ctx, "dir") assert.ErrorAs(t, err, &filer.DirectoryNotEmptyError{}) @@ -116,12 +115,12 @@ func commonFilerRecursiveDeleteTest(t *testing.T, ctx context.Context, f filer.F assert.ErrorAs(t, err, &filer.NoSuchDirectoryError{}) } -func TestAccFilerRecursiveDelete(t *testing.T) { +func TestFilerRecursiveDelete(t *testing.T) { t.Parallel() for _, testCase := range []struct { name string - f func(t *testing.T) (filer.Filer, string) + f func(t testutil.TestingT) (filer.Filer, string) }{ {"local", setupLocalFiler}, {"workspace files", setupWsfsFiler}, @@ -148,13 +147,13 @@ func commonFilerReadWriteTests(t *testing.T, ctx context.Context, f filer.Filer) // Write should fail because the intermediate directory doesn't exist. err = f.Write(ctx, "/foo/bar", strings.NewReader(`hello world`)) - assert.True(t, errors.As(err, &filer.NoSuchDirectoryError{})) - assert.True(t, errors.Is(err, fs.ErrNotExist)) + assert.ErrorAs(t, err, &filer.NoSuchDirectoryError{}) + assert.ErrorIs(t, err, fs.ErrNotExist) // Read should fail because the intermediate directory doesn't yet exist. _, err = f.Read(ctx, "/foo/bar") - assert.True(t, errors.As(err, &filer.FileDoesNotExistError{})) - assert.True(t, errors.Is(err, fs.ErrNotExist)) + assert.ErrorAs(t, err, &filer.FileDoesNotExistError{}) + assert.ErrorIs(t, err, fs.ErrNotExist) // Read should fail because the path points to a directory err = f.Mkdir(ctx, "/dir") @@ -169,8 +168,8 @@ func commonFilerReadWriteTests(t *testing.T, ctx context.Context, f filer.Filer) // Write should fail because there is an existing file at the specified path. err = f.Write(ctx, "/foo/bar", strings.NewReader(`hello universe`)) - assert.True(t, errors.As(err, &filer.FileAlreadyExistsError{})) - assert.True(t, errors.Is(err, fs.ErrExist)) + assert.ErrorAs(t, err, &filer.FileAlreadyExistsError{}) + assert.ErrorIs(t, err, fs.ErrExist) // Write with OverwriteIfExists should succeed. err = f.Write(ctx, "/foo/bar", strings.NewReader(`hello universe`), filer.OverwriteIfExists) @@ -187,7 +186,7 @@ func commonFilerReadWriteTests(t *testing.T, ctx context.Context, f filer.Filer) require.NoError(t, err) assert.Equal(t, "foo", info.Name()) assert.True(t, info.Mode().IsDir()) - assert.Equal(t, true, info.IsDir()) + assert.True(t, info.IsDir()) // Stat on a file should succeed. // Note: size and modification time behave differently between backends. @@ -195,17 +194,17 @@ func commonFilerReadWriteTests(t *testing.T, ctx context.Context, f filer.Filer) require.NoError(t, err) assert.Equal(t, "bar", info.Name()) assert.True(t, info.Mode().IsRegular()) - assert.Equal(t, false, info.IsDir()) + assert.False(t, info.IsDir()) // Delete should fail if the file doesn't exist. err = f.Delete(ctx, "/doesnt_exist") assert.ErrorAs(t, err, &filer.FileDoesNotExistError{}) - assert.True(t, errors.Is(err, fs.ErrNotExist)) + assert.ErrorIs(t, err, fs.ErrNotExist) // Stat should fail if the file doesn't exist. _, err = f.Stat(ctx, "/doesnt_exist") assert.ErrorAs(t, err, &filer.FileDoesNotExistError{}) - assert.True(t, errors.Is(err, fs.ErrNotExist)) + assert.ErrorIs(t, err, fs.ErrNotExist) // Delete should succeed for file that does exist. err = f.Delete(ctx, "/foo/bar") @@ -214,7 +213,7 @@ func commonFilerReadWriteTests(t *testing.T, ctx context.Context, f filer.Filer) // Delete should fail for a non-empty directory. err = f.Delete(ctx, "/foo") assert.ErrorAs(t, err, &filer.DirectoryNotEmptyError{}) - assert.True(t, errors.Is(err, fs.ErrInvalid)) + assert.ErrorIs(t, err, fs.ErrInvalid) // Delete should succeed for a non-empty directory if the DeleteRecursively flag is set. err = f.Delete(ctx, "/foo", filer.DeleteRecursively) @@ -223,16 +222,16 @@ func commonFilerReadWriteTests(t *testing.T, ctx context.Context, f filer.Filer) // Delete of the filer root should ALWAYS fail, otherwise subsequent writes would fail. // It is not in the filer's purview to delete its root directory. err = f.Delete(ctx, "/") - assert.True(t, errors.As(err, &filer.CannotDeleteRootError{})) - assert.True(t, errors.Is(err, fs.ErrInvalid)) + assert.ErrorAs(t, err, &filer.CannotDeleteRootError{}) + assert.ErrorIs(t, err, fs.ErrInvalid) } -func TestAccFilerReadWrite(t *testing.T) { +func TestFilerReadWrite(t *testing.T) { t.Parallel() for _, testCase := range []struct { name string - f func(t *testing.T) (filer.Filer, string) + f func(t testutil.TestingT) (filer.Filer, string) }{ {"local", setupLocalFiler}, {"workspace files", setupWsfsFiler}, @@ -261,7 +260,7 @@ func commonFilerReadDirTest(t *testing.T, ctx context.Context, f filer.Filer) { // We start with an empty directory. entries, err := f.ReadDir(ctx, ".") require.NoError(t, err) - assert.Len(t, entries, 0) + assert.Empty(t, entries) // Write a file. err = f.Write(ctx, "/hello.txt", strings.NewReader(`hello world`)) @@ -281,8 +280,8 @@ func commonFilerReadDirTest(t *testing.T, ctx context.Context, f filer.Filer) { // Expect an error if the path doesn't exist. _, err = f.ReadDir(ctx, "/dir/a/b/c/d/e") - assert.True(t, errors.As(err, &filer.NoSuchDirectoryError{}), err) - assert.True(t, errors.Is(err, fs.ErrNotExist)) + assert.ErrorAs(t, err, &filer.NoSuchDirectoryError{}, err) + assert.ErrorIs(t, err, fs.ErrNotExist) // Expect two entries in the root. entries, err = f.ReadDir(ctx, ".") @@ -294,7 +293,7 @@ func commonFilerReadDirTest(t *testing.T, ctx context.Context, f filer.Filer) { assert.False(t, entries[1].IsDir()) info, err = entries[1].Info() require.NoError(t, err) - assert.Greater(t, info.ModTime().Unix(), int64(0)) + assert.Positive(t, info.ModTime().Unix()) // Expect two entries in the directory. entries, err = f.ReadDir(ctx, "/dir") @@ -306,7 +305,7 @@ func commonFilerReadDirTest(t *testing.T, ctx context.Context, f filer.Filer) { assert.False(t, entries[1].IsDir()) info, err = entries[1].Info() require.NoError(t, err) - assert.Greater(t, info.ModTime().Unix(), int64(0)) + assert.Positive(t, info.ModTime().Unix()) // Expect a single entry in the nested path. entries, err = f.ReadDir(ctx, "/dir/a/b") @@ -324,7 +323,7 @@ func commonFilerReadDirTest(t *testing.T, ctx context.Context, f filer.Filer) { require.NoError(t, err) entries, err = f.ReadDir(ctx, "empty-dir") assert.NoError(t, err) - assert.Len(t, entries, 0) + assert.Empty(t, entries) // Expect one entry for a directory with a file in it err = f.Write(ctx, "dir-with-one-file/my-file.txt", strings.NewReader("abc"), filer.CreateParentDirectories) @@ -332,16 +331,16 @@ func commonFilerReadDirTest(t *testing.T, ctx context.Context, f filer.Filer) { entries, err = f.ReadDir(ctx, "dir-with-one-file") assert.NoError(t, err) assert.Len(t, entries, 1) - assert.Equal(t, entries[0].Name(), "my-file.txt") + assert.Equal(t, "my-file.txt", entries[0].Name()) assert.False(t, entries[0].IsDir()) } -func TestAccFilerReadDir(t *testing.T) { +func TestFilerReadDir(t *testing.T) { t.Parallel() for _, testCase := range []struct { name string - f func(t *testing.T) (filer.Filer, string) + f func(t testutil.TestingT) (filer.Filer, string) }{ {"local", setupLocalFiler}, {"workspace files", setupWsfsFiler}, @@ -361,7 +360,7 @@ func TestAccFilerReadDir(t *testing.T) { } } -func TestAccFilerWorkspaceNotebook(t *testing.T) { +func TestFilerWorkspaceNotebook(t *testing.T) { t.Parallel() ctx := context.Background() @@ -410,33 +409,33 @@ func TestAccFilerWorkspaceNotebook(t *testing.T) { { name: "pythonJupyterNb.ipynb", nameWithoutExt: "pythonJupyterNb", - content1: readFile(t, "testdata/notebooks/py1.ipynb"), + content1: testutil.ReadFile(t, "testdata/notebooks/py1.ipynb"), expected1: "# Databricks notebook source\nprint(1)", - content2: readFile(t, "testdata/notebooks/py2.ipynb"), + content2: testutil.ReadFile(t, "testdata/notebooks/py2.ipynb"), expected2: "# Databricks notebook source\nprint(2)", }, { name: "rJupyterNb.ipynb", nameWithoutExt: "rJupyterNb", - content1: readFile(t, "testdata/notebooks/r1.ipynb"), + content1: testutil.ReadFile(t, "testdata/notebooks/r1.ipynb"), expected1: "# Databricks notebook source\nprint(1)", - content2: readFile(t, "testdata/notebooks/r2.ipynb"), + content2: testutil.ReadFile(t, "testdata/notebooks/r2.ipynb"), expected2: "# Databricks notebook source\nprint(2)", }, { name: "scalaJupyterNb.ipynb", nameWithoutExt: "scalaJupyterNb", - content1: readFile(t, "testdata/notebooks/scala1.ipynb"), + content1: testutil.ReadFile(t, "testdata/notebooks/scala1.ipynb"), expected1: "// Databricks notebook source\nprintln(1)", - content2: readFile(t, "testdata/notebooks/scala2.ipynb"), + content2: testutil.ReadFile(t, "testdata/notebooks/scala2.ipynb"), expected2: "// Databricks notebook source\nprintln(2)", }, { name: "sqlJupyterNotebook.ipynb", nameWithoutExt: "sqlJupyterNotebook", - content1: readFile(t, "testdata/notebooks/sql1.ipynb"), + content1: testutil.ReadFile(t, "testdata/notebooks/sql1.ipynb"), expected1: "-- Databricks notebook source\nselect 1", - content2: readFile(t, "testdata/notebooks/sql2.ipynb"), + content2: testutil.ReadFile(t, "testdata/notebooks/sql2.ipynb"), expected2: "-- Databricks notebook source\nselect 2", }, } @@ -458,7 +457,7 @@ func TestAccFilerWorkspaceNotebook(t *testing.T) { // Assert uploading a second time fails due to overwrite mode missing err = f.Write(ctx, tc.name, strings.NewReader(tc.content2)) require.ErrorIs(t, err, fs.ErrExist) - assert.Regexp(t, regexp.MustCompile(`file already exists: .*/`+tc.nameWithoutExt+`$`), err.Error()) + assert.Regexp(t, `file already exists: .*/`+tc.nameWithoutExt+`$`, err.Error()) // Try uploading the notebook again with overwrite flag. This time it should succeed. err = f.Write(ctx, tc.name, strings.NewReader(tc.content2), filer.OverwriteIfExists) @@ -468,10 +467,9 @@ func TestAccFilerWorkspaceNotebook(t *testing.T) { filerTest{t, f}.assertContents(ctx, tc.nameWithoutExt, tc.expected2) }) } - } -func TestAccFilerWorkspaceFilesExtensionsReadDir(t *testing.T) { +func TestFilerWorkspaceFilesExtensionsReadDir(t *testing.T) { t.Parallel() files := []struct { @@ -484,13 +482,13 @@ func TestAccFilerWorkspaceFilesExtensionsReadDir(t *testing.T) { {"foo.r", "print('foo')"}, {"foo.scala", "println('foo')"}, {"foo.sql", "SELECT 'foo'"}, - {"py1.ipynb", readFile(t, "testdata/notebooks/py1.ipynb")}, + {"py1.ipynb", testutil.ReadFile(t, "testdata/notebooks/py1.ipynb")}, {"pyNb.py", "# Databricks notebook source\nprint('first upload'))"}, - {"r1.ipynb", readFile(t, "testdata/notebooks/r1.ipynb")}, + {"r1.ipynb", testutil.ReadFile(t, "testdata/notebooks/r1.ipynb")}, {"rNb.r", "# Databricks notebook source\nprint('first upload'))"}, - {"scala1.ipynb", readFile(t, "testdata/notebooks/scala1.ipynb")}, + {"scala1.ipynb", testutil.ReadFile(t, "testdata/notebooks/scala1.ipynb")}, {"scalaNb.scala", "// Databricks notebook source\n println(\"first upload\"))"}, - {"sql1.ipynb", readFile(t, "testdata/notebooks/sql1.ipynb")}, + {"sql1.ipynb", testutil.ReadFile(t, "testdata/notebooks/sql1.ipynb")}, {"sqlNb.sql", "-- Databricks notebook source\n SELECT \"first upload\""}, } @@ -555,10 +553,10 @@ func setupFilerWithExtensionsTest(t *testing.T) filer.Filer { }{ {"foo.py", "# Databricks notebook source\nprint('first upload'))"}, {"bar.py", "print('foo')"}, - {"p1.ipynb", readFile(t, "testdata/notebooks/py1.ipynb")}, - {"r1.ipynb", readFile(t, "testdata/notebooks/r1.ipynb")}, - {"scala1.ipynb", readFile(t, "testdata/notebooks/scala1.ipynb")}, - {"sql1.ipynb", readFile(t, "testdata/notebooks/sql1.ipynb")}, + {"p1.ipynb", testutil.ReadFile(t, "testdata/notebooks/py1.ipynb")}, + {"r1.ipynb", testutil.ReadFile(t, "testdata/notebooks/r1.ipynb")}, + {"scala1.ipynb", testutil.ReadFile(t, "testdata/notebooks/scala1.ipynb")}, + {"sql1.ipynb", testutil.ReadFile(t, "testdata/notebooks/sql1.ipynb")}, {"pretender", "not a notebook"}, {"dir/file.txt", "file content"}, {"scala-notebook.scala", "// Databricks notebook source\nprintln('first upload')"}, @@ -575,7 +573,7 @@ func setupFilerWithExtensionsTest(t *testing.T) filer.Filer { return wf } -func TestAccFilerWorkspaceFilesExtensionsRead(t *testing.T) { +func TestFilerWorkspaceFilesExtensionsRead(t *testing.T) { t.Parallel() ctx := context.Background() @@ -612,7 +610,7 @@ func TestAccFilerWorkspaceFilesExtensionsRead(t *testing.T) { assert.ErrorIs(t, err, fs.ErrNotExist) } -func TestAccFilerWorkspaceFilesExtensionsDelete(t *testing.T) { +func TestFilerWorkspaceFilesExtensionsDelete(t *testing.T) { t.Parallel() ctx := context.Background() @@ -661,7 +659,7 @@ func TestAccFilerWorkspaceFilesExtensionsDelete(t *testing.T) { filerTest{t, wf}.assertNotExists(ctx, "dir") } -func TestAccFilerWorkspaceFilesExtensionsStat(t *testing.T) { +func TestFilerWorkspaceFilesExtensionsStat(t *testing.T) { t.Parallel() ctx := context.Background() @@ -708,7 +706,7 @@ func TestAccFilerWorkspaceFilesExtensionsStat(t *testing.T) { } } -func TestAccWorkspaceFilesExtensionsDirectoriesAreNotNotebooks(t *testing.T) { +func TestWorkspaceFilesExtensionsDirectoriesAreNotNotebooks(t *testing.T) { t.Parallel() ctx := context.Background() @@ -723,14 +721,14 @@ func TestAccWorkspaceFilesExtensionsDirectoriesAreNotNotebooks(t *testing.T) { assert.ErrorIs(t, err, fs.ErrNotExist) } -func TestAccWorkspaceFilesExtensionsNotebooksAreNotReadAsFiles(t *testing.T) { +func TestWorkspaceFilesExtensionsNotebooksAreNotReadAsFiles(t *testing.T) { t.Parallel() ctx := context.Background() wf, _ := setupWsfsExtensionsFiler(t) // Create a notebook - err := wf.Write(ctx, "foo.ipynb", strings.NewReader(readFile(t, "testdata/notebooks/py1.ipynb"))) + err := wf.Write(ctx, "foo.ipynb", strings.NewReader(testutil.ReadFile(t, "testdata/notebooks/py1.ipynb"))) require.NoError(t, err) // Reading foo should fail. Even though the WSFS name for the notebook is foo @@ -742,14 +740,14 @@ func TestAccWorkspaceFilesExtensionsNotebooksAreNotReadAsFiles(t *testing.T) { assert.NoError(t, err) } -func TestAccWorkspaceFilesExtensionsNotebooksAreNotStatAsFiles(t *testing.T) { +func TestWorkspaceFilesExtensionsNotebooksAreNotStatAsFiles(t *testing.T) { t.Parallel() ctx := context.Background() wf, _ := setupWsfsExtensionsFiler(t) // Create a notebook - err := wf.Write(ctx, "foo.ipynb", strings.NewReader(readFile(t, "testdata/notebooks/py1.ipynb"))) + err := wf.Write(ctx, "foo.ipynb", strings.NewReader(testutil.ReadFile(t, "testdata/notebooks/py1.ipynb"))) require.NoError(t, err) // Stating foo should fail. Even though the WSFS name for the notebook is foo @@ -761,14 +759,14 @@ func TestAccWorkspaceFilesExtensionsNotebooksAreNotStatAsFiles(t *testing.T) { assert.NoError(t, err) } -func TestAccWorkspaceFilesExtensionsNotebooksAreNotDeletedAsFiles(t *testing.T) { +func TestWorkspaceFilesExtensionsNotebooksAreNotDeletedAsFiles(t *testing.T) { t.Parallel() ctx := context.Background() wf, _ := setupWsfsExtensionsFiler(t) // Create a notebook - err := wf.Write(ctx, "foo.ipynb", strings.NewReader(readFile(t, "testdata/notebooks/py1.ipynb"))) + err := wf.Write(ctx, "foo.ipynb", strings.NewReader(testutil.ReadFile(t, "testdata/notebooks/py1.ipynb"))) require.NoError(t, err) // Deleting foo should fail. Even though the WSFS name for the notebook is foo @@ -780,7 +778,7 @@ func TestAccWorkspaceFilesExtensionsNotebooksAreNotDeletedAsFiles(t *testing.T) assert.NoError(t, err) } -func TestAccWorkspaceFilesExtensions_ExportFormatIsPreserved(t *testing.T) { +func TestWorkspaceFilesExtensions_ExportFormatIsPreserved(t *testing.T) { t.Parallel() // Case 1: Writing source notebooks. @@ -850,25 +848,25 @@ func TestAccWorkspaceFilesExtensions_ExportFormatIsPreserved(t *testing.T) { language: "python", sourceName: "foo.py", jupyterName: "foo.ipynb", - jupyterContent: readFile(t, "testdata/notebooks/py1.ipynb"), + jupyterContent: testutil.ReadFile(t, "testdata/notebooks/py1.ipynb"), }, { language: "r", sourceName: "foo.r", jupyterName: "foo.ipynb", - jupyterContent: readFile(t, "testdata/notebooks/r1.ipynb"), + jupyterContent: testutil.ReadFile(t, "testdata/notebooks/r1.ipynb"), }, { language: "scala", sourceName: "foo.scala", jupyterName: "foo.ipynb", - jupyterContent: readFile(t, "testdata/notebooks/scala1.ipynb"), + jupyterContent: testutil.ReadFile(t, "testdata/notebooks/scala1.ipynb"), }, { language: "sql", sourceName: "foo.sql", jupyterName: "foo.ipynb", - jupyterContent: readFile(t, "testdata/notebooks/sql1.ipynb"), + jupyterContent: testutil.ReadFile(t, "testdata/notebooks/sql1.ipynb"), }, } { t.Run("jupyter_"+tc.language, func(t *testing.T) { diff --git a/integration/libs/filer/helpers_test.go b/integration/libs/filer/helpers_test.go new file mode 100644 index 000000000..a3a3aaae5 --- /dev/null +++ b/integration/libs/filer/helpers_test.go @@ -0,0 +1,73 @@ +package filer_test + +import ( + "errors" + "net/http" + "os" + "path" + "path/filepath" + + "github.com/databricks/cli/integration/internal/acc" + "github.com/databricks/cli/internal/testutil" + + "github.com/databricks/cli/libs/filer" + "github.com/databricks/databricks-sdk-go/apierr" + "github.com/stretchr/testify/require" +) + +func setupLocalFiler(t testutil.TestingT) (filer.Filer, string) { + tmp := t.TempDir() + f, err := filer.NewLocalClient(tmp) + require.NoError(t, err) + + return f, path.Join(filepath.ToSlash(tmp)) +} + +func setupWsfsFiler(t testutil.TestingT) (filer.Filer, string) { + ctx, wt := acc.WorkspaceTest(t) + + tmpdir := acc.TemporaryWorkspaceDir(wt) + f, err := filer.NewWorkspaceFilesClient(wt.W, tmpdir) + require.NoError(t, err) + + // Check if we can use this API here, skip test if we cannot. + _, err = f.Read(ctx, "we_use_this_call_to_test_if_this_api_is_enabled") + var aerr *apierr.APIError + if errors.As(err, &aerr) && aerr.StatusCode == http.StatusBadRequest { + t.Skip(aerr.Message) + } + + return f, tmpdir +} + +func setupWsfsExtensionsFiler(t testutil.TestingT) (filer.Filer, string) { + _, wt := acc.WorkspaceTest(t) + + tmpdir := acc.TemporaryWorkspaceDir(wt) + f, err := filer.NewWorkspaceFilesExtensionsClient(wt.W, tmpdir) + require.NoError(t, err) + return f, tmpdir +} + +func setupDbfsFiler(t testutil.TestingT) (filer.Filer, string) { + _, wt := acc.WorkspaceTest(t) + + tmpdir := acc.TemporaryDbfsDir(wt) + f, err := filer.NewDbfsClient(wt.W, tmpdir) + require.NoError(t, err) + return f, path.Join("dbfs:/", tmpdir) +} + +func setupUcVolumesFiler(t testutil.TestingT) (filer.Filer, string) { + _, wt := acc.WorkspaceTest(t) + + if os.Getenv("TEST_METASTORE_ID") == "" { + t.Skip("Skipping tests that require a UC Volume when metastore id is not set.") + } + + tmpdir := acc.TemporaryVolume(wt) + f, err := filer.NewFilesClient(wt.W, tmpdir) + require.NoError(t, err) + + return f, path.Join("dbfs:/", tmpdir) +} diff --git a/internal/testdata/notebooks/py1.ipynb b/integration/libs/filer/testdata/notebooks/py1.ipynb similarity index 100% rename from internal/testdata/notebooks/py1.ipynb rename to integration/libs/filer/testdata/notebooks/py1.ipynb diff --git a/internal/testdata/notebooks/py2.ipynb b/integration/libs/filer/testdata/notebooks/py2.ipynb similarity index 100% rename from internal/testdata/notebooks/py2.ipynb rename to integration/libs/filer/testdata/notebooks/py2.ipynb diff --git a/internal/testdata/notebooks/r1.ipynb b/integration/libs/filer/testdata/notebooks/r1.ipynb similarity index 100% rename from internal/testdata/notebooks/r1.ipynb rename to integration/libs/filer/testdata/notebooks/r1.ipynb diff --git a/internal/testdata/notebooks/r2.ipynb b/integration/libs/filer/testdata/notebooks/r2.ipynb similarity index 100% rename from internal/testdata/notebooks/r2.ipynb rename to integration/libs/filer/testdata/notebooks/r2.ipynb diff --git a/internal/testdata/notebooks/scala1.ipynb b/integration/libs/filer/testdata/notebooks/scala1.ipynb similarity index 100% rename from internal/testdata/notebooks/scala1.ipynb rename to integration/libs/filer/testdata/notebooks/scala1.ipynb diff --git a/internal/testdata/notebooks/scala2.ipynb b/integration/libs/filer/testdata/notebooks/scala2.ipynb similarity index 100% rename from internal/testdata/notebooks/scala2.ipynb rename to integration/libs/filer/testdata/notebooks/scala2.ipynb diff --git a/internal/testdata/notebooks/sql1.ipynb b/integration/libs/filer/testdata/notebooks/sql1.ipynb similarity index 100% rename from internal/testdata/notebooks/sql1.ipynb rename to integration/libs/filer/testdata/notebooks/sql1.ipynb diff --git a/internal/testdata/notebooks/sql2.ipynb b/integration/libs/filer/testdata/notebooks/sql2.ipynb similarity index 100% rename from internal/testdata/notebooks/sql2.ipynb rename to integration/libs/filer/testdata/notebooks/sql2.ipynb diff --git a/internal/git_clone_test.go b/integration/libs/git/git_clone_test.go similarity index 83% rename from internal/git_clone_test.go rename to integration/libs/git/git_clone_test.go index 73c3db105..cbc2d091d 100644 --- a/internal/git_clone_test.go +++ b/integration/libs/git/git_clone_test.go @@ -1,4 +1,4 @@ -package internal +package git_test import ( "context" @@ -10,9 +10,7 @@ import ( "github.com/stretchr/testify/assert" ) -func TestAccGitClone(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) - +func TestGitClone(t *testing.T) { tmpDir := t.TempDir() ctx := context.Background() var err error @@ -32,9 +30,7 @@ func TestAccGitClone(t *testing.T) { assert.Contains(t, string(b), "ide") } -func TestAccGitCloneOnNonDefaultBranch(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) - +func TestGitCloneOnNonDefaultBranch(t *testing.T) { tmpDir := t.TempDir() ctx := context.Background() var err error @@ -53,9 +49,7 @@ func TestAccGitCloneOnNonDefaultBranch(t *testing.T) { assert.Contains(t, string(b), "dais-2022") } -func TestAccGitCloneErrorsWhenRepositoryDoesNotExist(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) - +func TestGitCloneErrorsWhenRepositoryDoesNotExist(t *testing.T) { tmpDir := t.TempDir() err := git.Clone(context.Background(), "https://github.com/monalisa/doesnot-exist.git", "", tmpDir) diff --git a/internal/git_fetch_test.go b/integration/libs/git/git_fetch_test.go similarity index 68% rename from internal/git_fetch_test.go rename to integration/libs/git/git_fetch_test.go index 5dab6be76..0998d775b 100644 --- a/internal/git_fetch_test.go +++ b/integration/libs/git/git_fetch_test.go @@ -1,21 +1,24 @@ -package internal +package git_test import ( "os" "os/exec" "path" "path/filepath" + "strings" "testing" - "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/integration/internal/acc" "github.com/databricks/cli/libs/dbr" "github.com/databricks/cli/libs/git" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) -const examplesRepoUrl = "https://github.com/databricks/bundle-examples" -const examplesRepoProvider = "gitHub" +const ( + examplesRepoUrl = "https://github.com/databricks/bundle-examples" + examplesRepoProvider = "gitHub" +) func assertFullGitInfo(t *testing.T, expectedRoot string, info git.RepositoryInfo) { assert.Equal(t, "main", info.CurrentBranch) @@ -35,19 +38,18 @@ func assertSparseGitInfo(t *testing.T, expectedRoot string, info git.RepositoryI assert.Equal(t, expectedRoot, info.WorktreeRoot) } -func TestAccFetchRepositoryInfoAPI_FromRepo(t *testing.T) { +func ensureWorkspacePrefix(root string) string { + // The fixture helper doesn't include /Workspace, so include it here. + if !strings.HasPrefix(root, "/Workspace/") { + return path.Join("/Workspace", root) + } + return root +} + +func TestFetchRepositoryInfoAPI_FromRepo(t *testing.T) { ctx, wt := acc.WorkspaceTest(t) - me, err := wt.W.CurrentUser.Me(ctx) - require.NoError(t, err) + targetPath := ensureWorkspacePrefix(acc.TemporaryRepo(wt, examplesRepoUrl)) - targetPath := acc.RandomName(path.Join("/Workspace/Users", me.UserName, "/testing-clone-bundle-examples-")) - stdout, stderr := RequireSuccessfulRun(t, "repos", "create", examplesRepoUrl, examplesRepoProvider, "--path", targetPath) - t.Cleanup(func() { - RequireSuccessfulRun(t, "repos", "delete", targetPath) - }) - - assert.Empty(t, stderr.String()) - assert.NotEmpty(t, stdout.String()) ctx = dbr.MockRuntime(ctx, true) for _, inputPath := range []string{ @@ -62,18 +64,14 @@ func TestAccFetchRepositoryInfoAPI_FromRepo(t *testing.T) { } } -func TestAccFetchRepositoryInfoAPI_FromNonRepo(t *testing.T) { +func TestFetchRepositoryInfoAPI_FromNonRepo(t *testing.T) { ctx, wt := acc.WorkspaceTest(t) - me, err := wt.W.CurrentUser.Me(ctx) + rootPath := ensureWorkspacePrefix(acc.TemporaryWorkspaceDir(wt, "testing-nonrepo-")) + + // Create directory inside this root path (this is cleaned up as part of the root path). + err := wt.W.Workspace.MkdirsByPath(ctx, path.Join(rootPath, "a/b/c")) require.NoError(t, err) - rootPath := acc.RandomName(path.Join("/Workspace/Users", me.UserName, "testing-nonrepo-")) - _, stderr := RequireSuccessfulRun(t, "workspace", "mkdirs", path.Join(rootPath, "a/b/c")) - t.Cleanup(func() { - RequireSuccessfulRun(t, "workspace", "delete", "--recursive", rootPath) - }) - - assert.Empty(t, stderr.String()) ctx = dbr.MockRuntime(ctx, true) tests := []struct { @@ -101,14 +99,14 @@ func TestAccFetchRepositoryInfoAPI_FromNonRepo(t *testing.T) { assert.NoError(t, err) } else { assert.Error(t, err) - assert.Contains(t, err.Error(), test.msg) + assert.ErrorContains(t, err, test.msg) } assertEmptyGitInfo(t, info) }) } } -func TestAccFetchRepositoryInfoDotGit_FromGitRepo(t *testing.T) { +func TestFetchRepositoryInfoDotGit_FromGitRepo(t *testing.T) { ctx, wt := acc.WorkspaceTest(t) repo := cloneRepoLocally(t, examplesRepoUrl) @@ -135,12 +133,12 @@ func cloneRepoLocally(t *testing.T, repoUrl string) string { return localRoot } -func TestAccFetchRepositoryInfoDotGit_FromNonGitRepo(t *testing.T) { +func TestFetchRepositoryInfoDotGit_FromNonGitRepo(t *testing.T) { ctx, wt := acc.WorkspaceTest(t) tempDir := t.TempDir() root := filepath.Join(tempDir, "repo") - require.NoError(t, os.MkdirAll(filepath.Join(root, "a/b/c"), 0700)) + require.NoError(t, os.MkdirAll(filepath.Join(root, "a/b/c"), 0o700)) tests := []string{ filepath.Join(root, "a/b/c"), @@ -151,20 +149,20 @@ func TestAccFetchRepositoryInfoDotGit_FromNonGitRepo(t *testing.T) { for _, input := range tests { t.Run(input, func(t *testing.T) { info, err := git.FetchRepositoryInfo(ctx, input, wt.W) - assert.NoError(t, err) + assert.ErrorIs(t, err, os.ErrNotExist) assertEmptyGitInfo(t, info) }) } } -func TestAccFetchRepositoryInfoDotGit_FromBrokenGitRepo(t *testing.T) { +func TestFetchRepositoryInfoDotGit_FromBrokenGitRepo(t *testing.T) { ctx, wt := acc.WorkspaceTest(t) tempDir := t.TempDir() root := filepath.Join(tempDir, "repo") path := filepath.Join(root, "a/b/c") - require.NoError(t, os.MkdirAll(path, 0700)) - require.NoError(t, os.WriteFile(filepath.Join(root, ".git"), []byte(""), 0000)) + require.NoError(t, os.MkdirAll(path, 0o700)) + require.NoError(t, os.WriteFile(filepath.Join(root, ".git"), []byte(""), 0o000)) info, err := git.FetchRepositoryInfo(ctx, path, wt.W) assert.NoError(t, err) diff --git a/internal/locker_test.go b/integration/libs/locker/locker_test.go similarity index 84% rename from internal/locker_test.go rename to integration/libs/locker/locker_test.go index 3ae783d1b..524996465 100644 --- a/internal/locker_test.go +++ b/integration/libs/locker/locker_test.go @@ -1,4 +1,4 @@ -package internal +package locker_test import ( "context" @@ -11,6 +11,8 @@ import ( "testing" "time" + "github.com/databricks/cli/integration/internal/acc" + "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/filer" lockpkg "github.com/databricks/cli/libs/locker" "github.com/databricks/databricks-sdk-go" @@ -28,7 +30,7 @@ func createRemoteTestProject(t *testing.T, projectNamePrefix string, wsc *databr me, err := wsc.CurrentUser.Me(ctx) assert.NoError(t, err) - remoteProjectRoot := fmt.Sprintf("/Repos/%s/%s", me.UserName, RandomName(projectNamePrefix)) + remoteProjectRoot := fmt.Sprintf("/Repos/%s/%s", me.UserName, testutil.RandomName(projectNamePrefix)) repoInfo, err := wsc.Repos.Create(ctx, workspace.CreateRepoRequest{ Path: remoteProjectRoot, Url: EmptyRepoUrl, @@ -43,11 +45,9 @@ func createRemoteTestProject(t *testing.T, projectNamePrefix string, wsc *databr return remoteProjectRoot } -func TestAccLock(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) - ctx := context.TODO() - wsc, err := databricks.NewWorkspaceClient() - require.NoError(t, err) +func TestLock(t *testing.T) { + ctx, wt := acc.WorkspaceTest(t) + wsc := wt.W remoteProjectRoot := createRemoteTestProject(t, "lock-acc-", wsc) // 5 lockers try to acquire a lock at the same time @@ -60,13 +60,13 @@ func TestAccLock(t *testing.T) { lockerErrs := make([]error, numConcurrentLocks) lockers := make([]*lockpkg.Locker, numConcurrentLocks) - for i := 0; i < numConcurrentLocks; i++ { + for i := range numConcurrentLocks { lockers[i], err = lockpkg.CreateLocker("humpty.dumpty@databricks.com", remoteProjectRoot, wsc) require.NoError(t, err) } var wg sync.WaitGroup - for i := 0; i < numConcurrentLocks; i++ { + for i := range numConcurrentLocks { wg.Add(1) currentIndex := i go func() { @@ -80,7 +80,7 @@ func TestAccLock(t *testing.T) { countActive := 0 indexOfActiveLocker := 0 indexOfAnInactiveLocker := -1 - for i := 0; i < numConcurrentLocks; i++ { + for i := range numConcurrentLocks { if lockers[i].Active { countActive += 1 assert.NoError(t, lockerErrs[i]) @@ -102,7 +102,7 @@ func TestAccLock(t *testing.T) { assert.True(t, remoteLocker.AcquisitionTime.Equal(lockers[indexOfActiveLocker].State.AcquisitionTime), "remote locker acquisition time does not match active locker") // test all other locks (inactive ones) do not match the remote lock and Unlock fails - for i := 0; i < numConcurrentLocks; i++ { + for i := range numConcurrentLocks { if i == indexOfActiveLocker { continue } @@ -112,7 +112,7 @@ func TestAccLock(t *testing.T) { } // test inactive locks fail to write a file - for i := 0; i < numConcurrentLocks; i++ { + for i := range numConcurrentLocks { if i == indexOfActiveLocker { continue } @@ -133,13 +133,14 @@ func TestAccLock(t *testing.T) { // assert on active locker content var res map[string]string - json.Unmarshal(b, &res) + err = json.Unmarshal(b, &res) + require.NoError(t, err) assert.NoError(t, err) assert.Equal(t, "Khan", res["surname"]) assert.Equal(t, "Shah Rukh", res["name"]) // inactive locker file reads fail - for i := 0; i < numConcurrentLocks; i++ { + for i := range numConcurrentLocks { if i == indexOfActiveLocker { continue } @@ -162,14 +163,12 @@ func TestAccLock(t *testing.T) { assert.True(t, lockers[indexOfAnInactiveLocker].Active) } -func setupLockerTest(ctx context.Context, t *testing.T) (*lockpkg.Locker, filer.Filer) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) - - w, err := databricks.NewWorkspaceClient() - require.NoError(t, err) +func setupLockerTest(t *testing.T) (context.Context, *lockpkg.Locker, filer.Filer) { + ctx, wt := acc.WorkspaceTest(t) + w := wt.W // create temp wsfs dir - tmpDir := TemporaryWorkspaceDir(t, w) + tmpDir := acc.TemporaryWorkspaceDir(wt, "locker-") f, err := filer.NewWorkspaceFilesClient(w, tmpDir) require.NoError(t, err) @@ -177,12 +176,11 @@ func setupLockerTest(ctx context.Context, t *testing.T) (*lockpkg.Locker, filer. locker, err := lockpkg.CreateLocker("redfoo@databricks.com", tmpDir, w) require.NoError(t, err) - return locker, f + return ctx, locker, f } -func TestAccLockUnlockWithoutAllowsLockFileNotExist(t *testing.T) { - ctx := context.Background() - locker, f := setupLockerTest(ctx, t) +func TestLockUnlockWithoutAllowsLockFileNotExist(t *testing.T) { + ctx, locker, f := setupLockerTest(t) var err error // Acquire lock on tmp directory @@ -202,9 +200,8 @@ func TestAccLockUnlockWithoutAllowsLockFileNotExist(t *testing.T) { assert.ErrorIs(t, err, fs.ErrNotExist) } -func TestAccLockUnlockWithAllowsLockFileNotExist(t *testing.T) { - ctx := context.Background() - locker, f := setupLockerTest(ctx, t) +func TestLockUnlockWithAllowsLockFileNotExist(t *testing.T) { + ctx, locker, f := setupLockerTest(t) var err error // Acquire lock on tmp directory diff --git a/internal/tags_test.go b/integration/libs/tags/tags_test.go similarity index 84% rename from internal/tags_test.go rename to integration/libs/tags/tags_test.go index 2dd3759ac..8a54a966b 100644 --- a/internal/tags_test.go +++ b/integration/libs/tags/tags_test.go @@ -1,41 +1,27 @@ -package internal +package tags_test import ( - "context" "strings" "testing" + "github.com/databricks/cli/integration/internal/acc" "github.com/databricks/cli/internal/testutil" - "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/service/compute" "github.com/databricks/databricks-sdk-go/service/jobs" "github.com/stretchr/testify/require" ) func testTags(t *testing.T, tags map[string]string) error { - var nodeTypeId string - switch testutil.GetCloud(t) { - case testutil.AWS: - nodeTypeId = "i3.xlarge" - case testutil.Azure: - nodeTypeId = "Standard_DS4_v2" - case testutil.GCP: - nodeTypeId = "n1-standard-4" - } - - w, err := databricks.NewWorkspaceClient() - require.NoError(t, err) - - ctx := context.Background() - resp, err := w.Jobs.Create(ctx, jobs.CreateJob{ - Name: RandomName("test-tags-"), + ctx, wt := acc.WorkspaceTest(t) + resp, err := wt.W.Jobs.Create(ctx, jobs.CreateJob{ + Name: testutil.RandomName("test-tags-"), Tasks: []jobs.Task{ { TaskKey: "test", NewCluster: &compute.ClusterSpec{ SparkVersion: "13.3.x-scala2.12", NumWorkers: 1, - NodeTypeId: nodeTypeId, + NodeTypeId: testutil.GetCloud(t).NodeTypeID(), }, SparkPythonTask: &jobs.SparkPythonTask{ PythonFile: "/doesnt_exist.py", @@ -47,7 +33,11 @@ func testTags(t *testing.T, tags map[string]string) error { if resp != nil { t.Cleanup(func() { - w.Jobs.DeleteByJobId(ctx, resp.JobId) + _ = wt.W.Jobs.DeleteByJobId(ctx, resp.JobId) + // Cannot enable errchecking there, tests fail with: + // Error: Received unexpected error: + // Job 0 does not exist. + // require.NoError(t, err) }) } @@ -90,7 +80,7 @@ func runTagTestCases(t *testing.T, cases []tagTestCase) { } } -func TestAccTagKeyAWS(t *testing.T) { +func TestTagKeyAWS(t *testing.T) { testutil.Require(t, testutil.AWS) t.Parallel() @@ -122,7 +112,7 @@ func TestAccTagKeyAWS(t *testing.T) { }) } -func TestAccTagValueAWS(t *testing.T) { +func TestTagValueAWS(t *testing.T) { testutil.Require(t, testutil.AWS) t.Parallel() @@ -148,7 +138,7 @@ func TestAccTagValueAWS(t *testing.T) { }) } -func TestAccTagKeyAzure(t *testing.T) { +func TestTagKeyAzure(t *testing.T) { testutil.Require(t, testutil.Azure) t.Parallel() @@ -180,7 +170,7 @@ func TestAccTagKeyAzure(t *testing.T) { }) } -func TestAccTagValueAzure(t *testing.T) { +func TestTagValueAzure(t *testing.T) { testutil.Require(t, testutil.Azure) t.Parallel() @@ -200,7 +190,7 @@ func TestAccTagValueAzure(t *testing.T) { }) } -func TestAccTagKeyGCP(t *testing.T) { +func TestTagKeyGCP(t *testing.T) { testutil.Require(t, testutil.GCP) t.Parallel() @@ -232,7 +222,7 @@ func TestAccTagKeyGCP(t *testing.T) { }) } -func TestAccTagValueGCP(t *testing.T) { +func TestTagValueGCP(t *testing.T) { testutil.Require(t, testutil.GCP) t.Parallel() diff --git a/internal/python/python_tasks_test.go b/integration/python/python_tasks_test.go similarity index 63% rename from internal/python/python_tasks_test.go rename to integration/python/python_tasks_test.go index fde9b37f6..39b38f890 100644 --- a/internal/python/python_tasks_test.go +++ b/integration/python/python_tasks_test.go @@ -1,11 +1,10 @@ -package python +package python_test import ( "bytes" "context" "encoding/base64" "encoding/json" - "fmt" "os" "path" "slices" @@ -14,9 +13,11 @@ import ( "time" "github.com/databricks/cli/bundle/run/output" - "github.com/databricks/cli/internal" + "github.com/databricks/cli/integration/internal/acc" + "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/filer" "github.com/databricks/databricks-sdk-go" + "github.com/databricks/databricks-sdk-go/service/compute" "github.com/databricks/databricks-sdk-go/service/jobs" "github.com/databricks/databricks-sdk-go/service/workspace" "github.com/stretchr/testify/require" @@ -73,10 +74,9 @@ var sparkVersions = []string{ "14.1.x-scala2.12", } -func TestAccRunPythonTaskWorkspace(t *testing.T) { +func TestRunPythonTaskWorkspace(t *testing.T) { // TODO: remove RUN_PYTHON_TASKS_TEST when ready to be executed as part of nightly - internal.GetEnvOrSkipTest(t, "RUN_PYTHON_TASKS_TEST") - internal.GetEnvOrSkipTest(t, "CLOUD_ENV") + testutil.GetEnvOrSkipTest(t, "RUN_PYTHON_TASKS_TEST") unsupportedSparkVersionsForWheel := []string{ "11.3.x-scala2.12", @@ -94,10 +94,9 @@ func TestAccRunPythonTaskWorkspace(t *testing.T) { }) } -func TestAccRunPythonTaskDBFS(t *testing.T) { +func TestRunPythonTaskDBFS(t *testing.T) { // TODO: remove RUN_PYTHON_TASKS_TEST when ready to be executed as part of nightly - internal.GetEnvOrSkipTest(t, "RUN_PYTHON_TASKS_TEST") - internal.GetEnvOrSkipTest(t, "CLOUD_ENV") + testutil.GetEnvOrSkipTest(t, "RUN_PYTHON_TASKS_TEST") runPythonTasks(t, prepareDBFSFiles(t), testOpts{ name: "Python tasks from DBFS", @@ -107,10 +106,9 @@ func TestAccRunPythonTaskDBFS(t *testing.T) { }) } -func TestAccRunPythonTaskRepo(t *testing.T) { +func TestRunPythonTaskRepo(t *testing.T) { // TODO: remove RUN_PYTHON_TASKS_TEST when ready to be executed as part of nightly - internal.GetEnvOrSkipTest(t, "RUN_PYTHON_TASKS_TEST") - internal.GetEnvOrSkipTest(t, "CLOUD_ENV") + testutil.GetEnvOrSkipTest(t, "RUN_PYTHON_TASKS_TEST") runPythonTasks(t, prepareRepoFiles(t), testOpts{ name: "Python tasks from Repo", @@ -121,19 +119,16 @@ func TestAccRunPythonTaskRepo(t *testing.T) { } func runPythonTasks(t *testing.T, tw *testFiles, opts testOpts) { - env := internal.GetEnvOrSkipTest(t, "CLOUD_ENV") - t.Log(env) - w := tw.w - nodeTypeId := internal.GetNodeTypeId(env) + nodeTypeId := testutil.GetCloud(t).NodeTypeID() tasks := make([]jobs.SubmitTask, 0) if opts.includeNotebookTasks { - tasks = append(tasks, internal.GenerateNotebookTasks(tw.pyNotebookPath, sparkVersions, nodeTypeId)...) + tasks = append(tasks, GenerateNotebookTasks(tw.pyNotebookPath, sparkVersions, nodeTypeId)...) } if opts.includeSparkPythonTasks { - tasks = append(tasks, internal.GenerateSparkPythonTasks(tw.sparkPythonPath, sparkVersions, nodeTypeId)...) + tasks = append(tasks, GenerateSparkPythonTasks(tw.sparkPythonPath, sparkVersions, nodeTypeId)...) } if opts.includeWheelTasks { @@ -141,7 +136,7 @@ func runPythonTasks(t *testing.T, tw *testFiles, opts testOpts) { if len(opts.wheelSparkVersions) > 0 { versions = opts.wheelSparkVersions } - tasks = append(tasks, internal.GenerateWheelTasks(tw.wheelPath, versions, nodeTypeId)...) + tasks = append(tasks, GenerateWheelTasks(tw.wheelPath, versions, nodeTypeId)...) } ctx := context.Background() @@ -178,13 +173,13 @@ func runPythonTasks(t *testing.T, tw *testFiles, opts testOpts) { } func prepareWorkspaceFiles(t *testing.T) *testFiles { - ctx := context.Background() - w, err := databricks.NewWorkspaceClient() - require.NoError(t, err) + var err error + ctx, wt := acc.WorkspaceTest(t) + w := wt.W + + baseDir := acc.TemporaryWorkspaceDir(wt, "python-tasks-") - baseDir := internal.TemporaryWorkspaceDir(t, w) pyNotebookPath := path.Join(baseDir, "test.py") - err = w.Workspace.Import(ctx, workspace.Import{ Path: pyNotebookPath, Overwrite: true, @@ -224,11 +219,12 @@ func prepareWorkspaceFiles(t *testing.T) *testFiles { } func prepareDBFSFiles(t *testing.T) *testFiles { - ctx := context.Background() - w, err := databricks.NewWorkspaceClient() - require.NoError(t, err) + var err error + ctx, wt := acc.WorkspaceTest(t) + w := wt.W + + baseDir := acc.TemporaryDbfsDir(wt, "python-tasks-") - baseDir := internal.TemporaryDbfsDir(t, w) f, err := filer.NewDbfsClient(w, baseDir) require.NoError(t, err) @@ -247,21 +243,89 @@ func prepareDBFSFiles(t *testing.T) *testFiles { return &testFiles{ w: w, pyNotebookPath: path.Join(baseDir, "test.py"), - sparkPythonPath: fmt.Sprintf("dbfs:%s", path.Join(baseDir, "spark.py")), - wheelPath: fmt.Sprintf("dbfs:%s", path.Join(baseDir, "my_test_code-0.0.1-py3-none-any.whl")), + sparkPythonPath: "dbfs:" + path.Join(baseDir, "spark.py"), + wheelPath: "dbfs:" + path.Join(baseDir, "my_test_code-0.0.1-py3-none-any.whl"), } } func prepareRepoFiles(t *testing.T) *testFiles { - w, err := databricks.NewWorkspaceClient() - require.NoError(t, err) + _, wt := acc.WorkspaceTest(t) + w := wt.W + + baseDir := acc.TemporaryRepo(wt, "https://github.com/databricks/cli") - repo := internal.TemporaryRepo(t, w) packagePath := "internal/python/testdata" return &testFiles{ w: w, - pyNotebookPath: path.Join(repo, packagePath, "test"), - sparkPythonPath: path.Join(repo, packagePath, "spark.py"), - wheelPath: path.Join(repo, packagePath, "my_test_code-0.0.1-py3-none-any.whl"), + pyNotebookPath: path.Join(baseDir, packagePath, "test"), + sparkPythonPath: path.Join(baseDir, packagePath, "spark.py"), + wheelPath: path.Join(baseDir, packagePath, "my_test_code-0.0.1-py3-none-any.whl"), } } + +func GenerateNotebookTasks(notebookPath string, versions []string, nodeTypeId string) []jobs.SubmitTask { + tasks := make([]jobs.SubmitTask, 0) + for i := range versions { + task := jobs.SubmitTask{ + TaskKey: "notebook_" + strings.ReplaceAll(versions[i], ".", "_"), + NotebookTask: &jobs.NotebookTask{ + NotebookPath: notebookPath, + }, + NewCluster: &compute.ClusterSpec{ + SparkVersion: versions[i], + NumWorkers: 1, + NodeTypeId: nodeTypeId, + DataSecurityMode: compute.DataSecurityModeUserIsolation, + }, + } + tasks = append(tasks, task) + } + + return tasks +} + +func GenerateSparkPythonTasks(notebookPath string, versions []string, nodeTypeId string) []jobs.SubmitTask { + tasks := make([]jobs.SubmitTask, 0) + for i := range versions { + task := jobs.SubmitTask{ + TaskKey: "spark_" + strings.ReplaceAll(versions[i], ".", "_"), + SparkPythonTask: &jobs.SparkPythonTask{ + PythonFile: notebookPath, + }, + NewCluster: &compute.ClusterSpec{ + SparkVersion: versions[i], + NumWorkers: 1, + NodeTypeId: nodeTypeId, + DataSecurityMode: compute.DataSecurityModeUserIsolation, + }, + } + tasks = append(tasks, task) + } + + return tasks +} + +func GenerateWheelTasks(wheelPath string, versions []string, nodeTypeId string) []jobs.SubmitTask { + tasks := make([]jobs.SubmitTask, 0) + for i := range versions { + task := jobs.SubmitTask{ + TaskKey: "whl_" + strings.ReplaceAll(versions[i], ".", "_"), + PythonWheelTask: &jobs.PythonWheelTask{ + PackageName: "my_test_code", + EntryPoint: "run", + }, + NewCluster: &compute.ClusterSpec{ + SparkVersion: versions[i], + NumWorkers: 1, + NodeTypeId: nodeTypeId, + DataSecurityMode: compute.DataSecurityModeUserIsolation, + }, + Libraries: []compute.Library{ + {Whl: wheelPath}, + }, + } + tasks = append(tasks, task) + } + + return tasks +} diff --git a/internal/python/testdata/my_test_code-0.0.1-py3-none-any.whl b/integration/python/testdata/my_test_code-0.0.1-py3-none-any.whl similarity index 100% rename from internal/python/testdata/my_test_code-0.0.1-py3-none-any.whl rename to integration/python/testdata/my_test_code-0.0.1-py3-none-any.whl diff --git a/internal/python/testdata/spark.py b/integration/python/testdata/spark.py similarity index 100% rename from internal/python/testdata/spark.py rename to integration/python/testdata/spark.py diff --git a/internal/python/testdata/test.py b/integration/python/testdata/test.py similarity index 100% rename from internal/python/testdata/test.py rename to integration/python/testdata/test.py diff --git a/internal/alerts_test.go b/internal/alerts_test.go deleted file mode 100644 index 6d7544074..000000000 --- a/internal/alerts_test.go +++ /dev/null @@ -1,14 +0,0 @@ -package internal - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestAccAlertsCreateErrWhenNoArguments(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) - - _, _, err := RequireErrorRun(t, "alerts-legacy", "create") - assert.Equal(t, "please provide command input in JSON format by specifying the --json flag", err.Error()) -} diff --git a/internal/api_test.go b/internal/api_test.go deleted file mode 100644 index f3e8b7171..000000000 --- a/internal/api_test.go +++ /dev/null @@ -1,51 +0,0 @@ -package internal - -import ( - "encoding/json" - "fmt" - "path" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - _ "github.com/databricks/cli/cmd/api" -) - -func TestAccApiGet(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) - - stdout, _ := RequireSuccessfulRun(t, "api", "get", "/api/2.0/preview/scim/v2/Me") - - // Deserialize SCIM API response. - var out map[string]any - err := json.Unmarshal(stdout.Bytes(), &out) - require.NoError(t, err) - - // Assert that the output somewhat makes sense for the SCIM API. - assert.Equal(t, true, out["active"]) - assert.NotNil(t, out["id"]) -} - -func TestAccApiPost(t *testing.T) { - env := GetEnvOrSkipTest(t, "CLOUD_ENV") - t.Log(env) - if env == "gcp" { - t.Skip("DBFS REST API is disabled on gcp") - } - - dbfsPath := path.Join("/tmp/databricks/integration", RandomName("api-post")) - requestPath := writeFile(t, "body.json", fmt.Sprintf(`{ - "path": "%s" - }`, dbfsPath)) - - // Post to mkdir - { - RequireSuccessfulRun(t, "api", "post", "--json=@"+requestPath, "/api/2.0/dbfs/mkdirs") - } - - // Post to delete - { - RequireSuccessfulRun(t, "api", "post", "--json=@"+requestPath, "/api/2.0/dbfs/delete") - } -} diff --git a/internal/bugbash/exec.sh b/internal/bugbash/exec.sh index ac25b16ed..4a087dc66 100755 --- a/internal/bugbash/exec.sh +++ b/internal/bugbash/exec.sh @@ -31,7 +31,7 @@ function cli_snapshot_directory() { dir="${dir}_386" ;; arm64|aarch64) - dir="${dir}_arm64" + dir="${dir}_arm64_v8.0" ;; armv7l|armv8l) dir="${dir}_arm_6" diff --git a/internal/build/variables.go b/internal/build/variables.go index 197dee9c3..80c4683ab 100644 --- a/internal/build/variables.go +++ b/internal/build/variables.go @@ -1,21 +1,27 @@ package build -var buildProjectName string = "cli" -var buildVersion string = "" +var ( + buildProjectName string = "cli" + buildVersion string = "" +) -var buildBranch string = "undefined" -var buildTag string = "undefined" -var buildShortCommit string = "00000000" -var buildFullCommit string = "0000000000000000000000000000000000000000" -var buildCommitTimestamp string = "0" -var buildSummary string = "v0.0.0" +var ( + buildBranch string = "undefined" + buildTag string = "undefined" + buildShortCommit string = "00000000" + buildFullCommit string = "0000000000000000000000000000000000000000" + buildCommitTimestamp string = "0" + buildSummary string = "v0.0.0" +) -var buildMajor string = "0" -var buildMinor string = "0" -var buildPatch string = "0" -var buildPrerelease string = "" -var buildIsSnapshot string = "false" -var buildTimestamp string = "0" +var ( + buildMajor string = "0" + buildMinor string = "0" + buildPatch string = "0" + buildPrerelease string = "" + buildIsSnapshot string = "false" + buildTimestamp string = "0" +) // This function is used to set the build version for testing purposes. func SetBuildVersion(version string) { diff --git a/internal/bundle/basic_test.go b/internal/bundle/basic_test.go deleted file mode 100644 index c24ef0c05..000000000 --- a/internal/bundle/basic_test.go +++ /dev/null @@ -1,43 +0,0 @@ -package bundle - -import ( - "os" - "path/filepath" - "testing" - - "github.com/databricks/cli/internal" - "github.com/databricks/cli/internal/acc" - "github.com/databricks/cli/libs/env" - "github.com/google/uuid" - "github.com/stretchr/testify/require" -) - -func TestAccBasicBundleDeployWithFailOnActiveRuns(t *testing.T) { - ctx, _ := acc.WorkspaceTest(t) - - nodeTypeId := internal.GetNodeTypeId(env.Get(ctx, "CLOUD_ENV")) - uniqueId := uuid.New().String() - root, err := initTestTemplate(t, ctx, "basic", map[string]any{ - "unique_id": uniqueId, - "node_type_id": nodeTypeId, - "spark_version": defaultSparkVersion, - }) - require.NoError(t, err) - - t.Cleanup(func() { - err = destroyBundle(t, ctx, root) - require.NoError(t, err) - }) - - // deploy empty bundle - err = deployBundleWithFlags(t, ctx, root, []string{"--fail-on-active-runs"}) - require.NoError(t, err) - - // Remove .databricks directory to simulate a fresh deployment - err = os.RemoveAll(filepath.Join(root, ".databricks")) - require.NoError(t, err) - - // deploy empty bundle again - err = deployBundleWithFlags(t, ctx, root, []string{"--fail-on-active-runs"}) - require.NoError(t, err) -} diff --git a/internal/bundle/deploy_to_shared_test.go b/internal/bundle/deploy_to_shared_test.go deleted file mode 100644 index 568c1fb56..000000000 --- a/internal/bundle/deploy_to_shared_test.go +++ /dev/null @@ -1,38 +0,0 @@ -package bundle - -import ( - "fmt" - "testing" - - "github.com/databricks/cli/internal" - "github.com/databricks/cli/internal/acc" - "github.com/databricks/cli/libs/env" - "github.com/google/uuid" - "github.com/stretchr/testify/require" -) - -func TestAccDeployBasicToSharedWorkspacePath(t *testing.T) { - ctx, wt := acc.WorkspaceTest(t) - - nodeTypeId := internal.GetNodeTypeId(env.Get(ctx, "CLOUD_ENV")) - uniqueId := uuid.New().String() - - currentUser, err := wt.W.CurrentUser.Me(ctx) - require.NoError(t, err) - - bundleRoot, err := initTestTemplate(t, ctx, "basic", map[string]any{ - "unique_id": uniqueId, - "node_type_id": nodeTypeId, - "spark_version": defaultSparkVersion, - "root_path": fmt.Sprintf("/Shared/%s", currentUser.UserName), - }) - require.NoError(t, err) - - t.Cleanup(func() { - err = destroyBundle(wt.T, ctx, bundleRoot) - require.NoError(wt.T, err) - }) - - err = deployBundle(wt.T, ctx, bundleRoot) - require.NoError(wt.T, err) -} diff --git a/internal/clusters_test.go b/internal/clusters_test.go deleted file mode 100644 index 6daddcce3..000000000 --- a/internal/clusters_test.go +++ /dev/null @@ -1,40 +0,0 @@ -package internal - -import ( - "fmt" - "regexp" - "testing" - - "github.com/stretchr/testify/assert" -) - -var clusterId string - -func TestAccClustersList(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) - - stdout, stderr := RequireSuccessfulRun(t, "clusters", "list") - outStr := stdout.String() - assert.Contains(t, outStr, "ID") - assert.Contains(t, outStr, "Name") - assert.Contains(t, outStr, "State") - assert.Equal(t, "", stderr.String()) - - idRegExp := regexp.MustCompile(`[0-9]{4}\-[0-9]{6}-[a-z0-9]{8}`) - clusterId = idRegExp.FindString(outStr) - assert.NotEmpty(t, clusterId) -} - -func TestAccClustersGet(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) - - stdout, stderr := RequireSuccessfulRun(t, "clusters", "get", clusterId) - outStr := stdout.String() - assert.Contains(t, outStr, fmt.Sprintf(`"cluster_id":"%s"`, clusterId)) - assert.Equal(t, "", stderr.String()) -} - -func TestClusterCreateErrorWhenNoArguments(t *testing.T) { - _, _, err := RequireErrorRun(t, "clusters", "create") - assert.Contains(t, err.Error(), "accepts 1 arg(s), received 0") -} diff --git a/internal/helpers.go b/internal/helpers.go deleted file mode 100644 index 596f45537..000000000 --- a/internal/helpers.go +++ /dev/null @@ -1,623 +0,0 @@ -package internal - -import ( - "bufio" - "bytes" - "context" - "encoding/json" - "errors" - "fmt" - "io" - "math/rand" - "net/http" - "os" - "path" - "path/filepath" - "reflect" - "strings" - "sync" - "testing" - "time" - - "github.com/databricks/cli/cmd/root" - "github.com/databricks/cli/internal/acc" - "github.com/databricks/cli/libs/flags" - - "github.com/databricks/cli/cmd" - _ "github.com/databricks/cli/cmd/version" - "github.com/databricks/cli/libs/cmdio" - "github.com/databricks/cli/libs/filer" - "github.com/databricks/databricks-sdk-go" - "github.com/databricks/databricks-sdk-go/apierr" - "github.com/databricks/databricks-sdk-go/service/catalog" - "github.com/databricks/databricks-sdk-go/service/compute" - "github.com/databricks/databricks-sdk-go/service/files" - "github.com/databricks/databricks-sdk-go/service/jobs" - "github.com/databricks/databricks-sdk-go/service/workspace" - "github.com/spf13/cobra" - "github.com/spf13/pflag" - "github.com/stretchr/testify/require" - - _ "github.com/databricks/cli/cmd/workspace" -) - -const charset = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" - -// GetEnvOrSkipTest proceeds with test only with that env variable -func GetEnvOrSkipTest(t *testing.T, name string) string { - value := os.Getenv(name) - if value == "" { - t.Skipf("Environment variable %s is missing", name) - } - return value -} - -// RandomName gives random name with optional prefix. e.g. qa.RandomName("tf-") -func RandomName(prefix ...string) string { - randLen := 12 - b := make([]byte, randLen) - for i := range b { - b[i] = charset[rand.Intn(randLen)] - } - if len(prefix) > 0 { - return fmt.Sprintf("%s%s", strings.Join(prefix, ""), b) - } - return string(b) -} - -// Helper for running the root command in the background. -// It ensures that the background goroutine terminates upon -// test completion through cancelling the command context. -type cobraTestRunner struct { - *testing.T - - args []string - stdout bytes.Buffer - stderr bytes.Buffer - stdinR *io.PipeReader - stdinW *io.PipeWriter - - ctx context.Context - - // Line-by-line output. - // Background goroutines populate these channels by reading from stdout/stderr pipes. - stdoutLines <-chan string - stderrLines <-chan string - - errch <-chan error -} - -func consumeLines(ctx context.Context, wg *sync.WaitGroup, r io.Reader) <-chan string { - ch := make(chan string, 30000) - wg.Add(1) - go func() { - defer close(ch) - defer wg.Done() - scanner := bufio.NewScanner(r) - for scanner.Scan() { - // We expect to be able to always send these lines into the channel. - // If we can't, it means the channel is full and likely there is a problem - // in either the test or the code under test. - select { - case <-ctx.Done(): - return - case ch <- scanner.Text(): - continue - default: - panic("line buffer is full") - } - } - }() - return ch -} - -func (t *cobraTestRunner) registerFlagCleanup(c *cobra.Command) { - // Find target command that will be run. Example: if the command run is `databricks fs cp`, - // target command corresponds to `cp` - targetCmd, _, err := c.Find(t.args) - if err != nil && strings.HasPrefix(err.Error(), "unknown command") { - // even if command is unknown, we can proceed - require.NotNil(t, targetCmd) - } else { - require.NoError(t, err) - } - - // Force initialization of default flags. - // These are initialized by cobra at execution time and would otherwise - // not be cleaned up by the cleanup function below. - targetCmd.InitDefaultHelpFlag() - targetCmd.InitDefaultVersionFlag() - - // Restore flag values to their original value on test completion. - targetCmd.Flags().VisitAll(func(f *pflag.Flag) { - v := reflect.ValueOf(f.Value) - if v.Kind() == reflect.Ptr { - v = v.Elem() - } - // Store copy of the current flag value. - reset := reflect.New(v.Type()).Elem() - reset.Set(v) - t.Cleanup(func() { - v.Set(reset) - }) - }) -} - -// Like [cobraTestRunner.Eventually], but more specific -func (t *cobraTestRunner) WaitForTextPrinted(text string, timeout time.Duration) { - t.Eventually(func() bool { - currentStdout := t.stdout.String() - return strings.Contains(currentStdout, text) - }, timeout, 50*time.Millisecond) -} - -func (t *cobraTestRunner) WaitForOutput(text string, timeout time.Duration) { - require.Eventually(t.T, func() bool { - currentStdout := t.stdout.String() - currentErrout := t.stderr.String() - return strings.Contains(currentStdout, text) || strings.Contains(currentErrout, text) - }, timeout, 50*time.Millisecond) -} - -func (t *cobraTestRunner) WithStdin() { - reader, writer := io.Pipe() - t.stdinR = reader - t.stdinW = writer -} - -func (t *cobraTestRunner) CloseStdin() { - if t.stdinW == nil { - panic("no standard input configured") - } - t.stdinW.Close() -} - -func (t *cobraTestRunner) SendText(text string) { - if t.stdinW == nil { - panic("no standard input configured") - } - t.stdinW.Write([]byte(text + "\n")) -} - -func (t *cobraTestRunner) RunBackground() { - var stdoutR, stderrR io.Reader - var stdoutW, stderrW io.WriteCloser - stdoutR, stdoutW = io.Pipe() - stderrR, stderrW = io.Pipe() - ctx := cmdio.NewContext(t.ctx, &cmdio.Logger{ - Mode: flags.ModeAppend, - Reader: bufio.Reader{}, - Writer: stderrW, - }) - - cli := cmd.New(ctx) - cli.SetOut(stdoutW) - cli.SetErr(stderrW) - cli.SetArgs(t.args) - if t.stdinW != nil { - cli.SetIn(t.stdinR) - } - - // Register cleanup function to restore flags to their original values - // once test has been executed. This is needed because flag values reside - // in a global singleton data-structure, and thus subsequent tests might - // otherwise interfere with each other - t.registerFlagCleanup(cli) - - errch := make(chan error) - ctx, cancel := context.WithCancel(ctx) - - // Tee stdout/stderr to buffers. - stdoutR = io.TeeReader(stdoutR, &t.stdout) - stderrR = io.TeeReader(stderrR, &t.stderr) - - // Consume stdout/stderr line-by-line. - var wg sync.WaitGroup - t.stdoutLines = consumeLines(ctx, &wg, stdoutR) - t.stderrLines = consumeLines(ctx, &wg, stderrR) - - // Run command in background. - go func() { - err := root.Execute(ctx, cli) - if err != nil { - t.Logf("Error running command: %s", err) - } - - // Close pipes to signal EOF. - stdoutW.Close() - stderrW.Close() - - // Wait for the [consumeLines] routines to finish now that - // the pipes they're reading from have closed. - wg.Wait() - - if t.stdout.Len() > 0 { - // Make a copy of the buffer such that it remains "unread". - scanner := bufio.NewScanner(bytes.NewBuffer(t.stdout.Bytes())) - for scanner.Scan() { - t.Logf("[databricks stdout]: %s", scanner.Text()) - } - } - - if t.stderr.Len() > 0 { - // Make a copy of the buffer such that it remains "unread". - scanner := bufio.NewScanner(bytes.NewBuffer(t.stderr.Bytes())) - for scanner.Scan() { - t.Logf("[databricks stderr]: %s", scanner.Text()) - } - } - - // Reset context on command for the next test. - // These commands are globals so we have to clean up to the best of our ability after each run. - // See https://github.com/spf13/cobra/blob/a6f198b635c4b18fff81930c40d464904e55b161/command.go#L1062-L1066 - //nolint:staticcheck // cobra sets the context and doesn't clear it - cli.SetContext(nil) - - // Make caller aware of error. - errch <- err - close(errch) - }() - - // Ensure command terminates upon test completion (success or failure). - t.Cleanup(func() { - // Signal termination of command. - cancel() - // Wait for goroutine to finish. - <-errch - }) - - t.errch = errch -} - -func (t *cobraTestRunner) Run() (bytes.Buffer, bytes.Buffer, error) { - t.RunBackground() - err := <-t.errch - return t.stdout, t.stderr, err -} - -// Like [require.Eventually] but errors if the underlying command has failed. -func (c *cobraTestRunner) Eventually(condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...any) { - ch := make(chan bool, 1) - - timer := time.NewTimer(waitFor) - defer timer.Stop() - - ticker := time.NewTicker(tick) - defer ticker.Stop() - - // Kick off condition check immediately. - go func() { ch <- condition() }() - - for tick := ticker.C; ; { - select { - case err := <-c.errch: - require.Fail(c, "Command failed", err) - return - case <-timer.C: - require.Fail(c, "Condition never satisfied", msgAndArgs...) - return - case <-tick: - tick = nil - go func() { ch <- condition() }() - case v := <-ch: - if v { - return - } - tick = ticker.C - } - } -} - -func (t *cobraTestRunner) RunAndExpectOutput(heredoc string) { - stdout, _, err := t.Run() - require.NoError(t, err) - require.Equal(t, cmdio.Heredoc(heredoc), strings.TrimSpace(stdout.String())) -} - -func (t *cobraTestRunner) RunAndParseJSON(v any) { - stdout, _, err := t.Run() - require.NoError(t, err) - err = json.Unmarshal(stdout.Bytes(), &v) - require.NoError(t, err) -} - -func NewCobraTestRunner(t *testing.T, args ...string) *cobraTestRunner { - return &cobraTestRunner{ - T: t, - ctx: context.Background(), - args: args, - } -} - -func NewCobraTestRunnerWithContext(t *testing.T, ctx context.Context, args ...string) *cobraTestRunner { - return &cobraTestRunner{ - T: t, - ctx: ctx, - args: args, - } -} - -func RequireSuccessfulRun(t *testing.T, args ...string) (bytes.Buffer, bytes.Buffer) { - t.Logf("run args: [%s]", strings.Join(args, ", ")) - c := NewCobraTestRunner(t, args...) - stdout, stderr, err := c.Run() - require.NoError(t, err) - return stdout, stderr -} - -func RequireErrorRun(t *testing.T, args ...string) (bytes.Buffer, bytes.Buffer, error) { - c := NewCobraTestRunner(t, args...) - stdout, stderr, err := c.Run() - require.Error(t, err) - return stdout, stderr, err -} - -func readFile(t *testing.T, name string) string { - b, err := os.ReadFile(name) - require.NoError(t, err) - - return string(b) -} - -func writeFile(t *testing.T, name string, body string) string { - f, err := os.Create(filepath.Join(t.TempDir(), name)) - require.NoError(t, err) - _, err = f.WriteString(body) - require.NoError(t, err) - f.Close() - return f.Name() -} - -func GenerateNotebookTasks(notebookPath string, versions []string, nodeTypeId string) []jobs.SubmitTask { - tasks := make([]jobs.SubmitTask, 0) - for i := 0; i < len(versions); i++ { - task := jobs.SubmitTask{ - TaskKey: fmt.Sprintf("notebook_%s", strings.ReplaceAll(versions[i], ".", "_")), - NotebookTask: &jobs.NotebookTask{ - NotebookPath: notebookPath, - }, - NewCluster: &compute.ClusterSpec{ - SparkVersion: versions[i], - NumWorkers: 1, - NodeTypeId: nodeTypeId, - DataSecurityMode: compute.DataSecurityModeUserIsolation, - }, - } - tasks = append(tasks, task) - } - - return tasks -} - -func GenerateSparkPythonTasks(notebookPath string, versions []string, nodeTypeId string) []jobs.SubmitTask { - tasks := make([]jobs.SubmitTask, 0) - for i := 0; i < len(versions); i++ { - task := jobs.SubmitTask{ - TaskKey: fmt.Sprintf("spark_%s", strings.ReplaceAll(versions[i], ".", "_")), - SparkPythonTask: &jobs.SparkPythonTask{ - PythonFile: notebookPath, - }, - NewCluster: &compute.ClusterSpec{ - SparkVersion: versions[i], - NumWorkers: 1, - NodeTypeId: nodeTypeId, - DataSecurityMode: compute.DataSecurityModeUserIsolation, - }, - } - tasks = append(tasks, task) - } - - return tasks -} - -func GenerateWheelTasks(wheelPath string, versions []string, nodeTypeId string) []jobs.SubmitTask { - tasks := make([]jobs.SubmitTask, 0) - for i := 0; i < len(versions); i++ { - task := jobs.SubmitTask{ - TaskKey: fmt.Sprintf("whl_%s", strings.ReplaceAll(versions[i], ".", "_")), - PythonWheelTask: &jobs.PythonWheelTask{ - PackageName: "my_test_code", - EntryPoint: "run", - }, - NewCluster: &compute.ClusterSpec{ - SparkVersion: versions[i], - NumWorkers: 1, - NodeTypeId: nodeTypeId, - DataSecurityMode: compute.DataSecurityModeUserIsolation, - }, - Libraries: []compute.Library{ - {Whl: wheelPath}, - }, - } - tasks = append(tasks, task) - } - - return tasks -} - -func TemporaryWorkspaceDir(t *testing.T, w *databricks.WorkspaceClient) string { - ctx := context.Background() - me, err := w.CurrentUser.Me(ctx) - require.NoError(t, err) - - basePath := fmt.Sprintf("/Users/%s/%s", me.UserName, RandomName("integration-test-wsfs-")) - - t.Logf("Creating %s", basePath) - err = w.Workspace.MkdirsByPath(ctx, basePath) - require.NoError(t, err) - - // Remove test directory on test completion. - t.Cleanup(func() { - t.Logf("Removing %s", basePath) - err := w.Workspace.Delete(ctx, workspace.Delete{ - Path: basePath, - Recursive: true, - }) - if err == nil || apierr.IsMissing(err) { - return - } - t.Logf("Unable to remove temporary workspace directory %s: %#v", basePath, err) - }) - - return basePath -} - -func TemporaryDbfsDir(t *testing.T, w *databricks.WorkspaceClient) string { - ctx := context.Background() - path := fmt.Sprintf("/tmp/%s", RandomName("integration-test-dbfs-")) - - t.Logf("Creating DBFS folder:%s", path) - err := w.Dbfs.MkdirsByPath(ctx, path) - require.NoError(t, err) - - t.Cleanup(func() { - t.Logf("Removing DBFS folder:%s", path) - err := w.Dbfs.Delete(ctx, files.Delete{ - Path: path, - Recursive: true, - }) - if err == nil || apierr.IsMissing(err) { - return - } - t.Logf("unable to remove temporary dbfs directory %s: %#v", path, err) - }) - - return path -} - -// Create a new UC volume in a catalog called "main" in the workspace. -func TemporaryUcVolume(t *testing.T, w *databricks.WorkspaceClient) string { - ctx := context.Background() - - // Create a schema - schema, err := w.Schemas.Create(ctx, catalog.CreateSchema{ - CatalogName: "main", - Name: RandomName("test-schema-"), - }) - require.NoError(t, err) - t.Cleanup(func() { - w.Schemas.Delete(ctx, catalog.DeleteSchemaRequest{ - FullName: schema.FullName, - }) - }) - - // Create a volume - volume, err := w.Volumes.Create(ctx, catalog.CreateVolumeRequestContent{ - CatalogName: "main", - SchemaName: schema.Name, - Name: "my-volume", - VolumeType: catalog.VolumeTypeManaged, - }) - require.NoError(t, err) - t.Cleanup(func() { - w.Volumes.Delete(ctx, catalog.DeleteVolumeRequest{ - Name: volume.FullName, - }) - }) - - return path.Join("/Volumes", "main", schema.Name, volume.Name) - -} - -func TemporaryRepo(t *testing.T, w *databricks.WorkspaceClient) string { - ctx := context.Background() - me, err := w.CurrentUser.Me(ctx) - require.NoError(t, err) - - repoPath := fmt.Sprintf("/Repos/%s/%s", me.UserName, RandomName("integration-test-repo-")) - - t.Logf("Creating repo:%s", repoPath) - repoInfo, err := w.Repos.Create(ctx, workspace.CreateRepoRequest{ - Url: "https://github.com/databricks/cli", - Provider: "github", - Path: repoPath, - }) - require.NoError(t, err) - - t.Cleanup(func() { - t.Logf("Removing repo: %s", repoPath) - err := w.Repos.Delete(ctx, workspace.DeleteRepoRequest{ - RepoId: repoInfo.Id, - }) - if err == nil || apierr.IsMissing(err) { - return - } - t.Logf("unable to remove repo %s: %#v", repoPath, err) - }) - - return repoPath -} - -func GetNodeTypeId(env string) string { - if env == "gcp" { - return "n1-standard-4" - } else if env == "aws" || env == "ucws" { - // aws-prod-ucws has CLOUD_ENV set to "ucws" - return "i3.xlarge" - } - return "Standard_DS4_v2" -} - -func setupLocalFiler(t *testing.T) (filer.Filer, string) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) - - tmp := t.TempDir() - f, err := filer.NewLocalClient(tmp) - require.NoError(t, err) - - return f, path.Join(filepath.ToSlash(tmp)) -} - -func setupWsfsFiler(t *testing.T) (filer.Filer, string) { - ctx, wt := acc.WorkspaceTest(t) - - tmpdir := TemporaryWorkspaceDir(t, wt.W) - f, err := filer.NewWorkspaceFilesClient(wt.W, tmpdir) - require.NoError(t, err) - - // Check if we can use this API here, skip test if we cannot. - _, err = f.Read(ctx, "we_use_this_call_to_test_if_this_api_is_enabled") - var aerr *apierr.APIError - if errors.As(err, &aerr) && aerr.StatusCode == http.StatusBadRequest { - t.Skip(aerr.Message) - } - - return f, tmpdir -} - -func setupWsfsExtensionsFiler(t *testing.T) (filer.Filer, string) { - _, wt := acc.WorkspaceTest(t) - - tmpdir := TemporaryWorkspaceDir(t, wt.W) - f, err := filer.NewWorkspaceFilesExtensionsClient(wt.W, tmpdir) - require.NoError(t, err) - - return f, tmpdir -} - -func setupDbfsFiler(t *testing.T) (filer.Filer, string) { - _, wt := acc.WorkspaceTest(t) - - tmpDir := TemporaryDbfsDir(t, wt.W) - f, err := filer.NewDbfsClient(wt.W, tmpDir) - require.NoError(t, err) - - return f, path.Join("dbfs:/", tmpDir) -} - -func setupUcVolumesFiler(t *testing.T) (filer.Filer, string) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) - - if os.Getenv("TEST_METASTORE_ID") == "" { - t.Skip("Skipping tests that require a UC Volume when metastore id is not set.") - } - - w, err := databricks.NewWorkspaceClient() - require.NoError(t, err) - - tmpDir := TemporaryUcVolume(t, w) - f, err := filer.NewFilesClient(w, tmpDir) - require.NoError(t, err) - - return f, path.Join("dbfs:/", tmpDir) -} diff --git a/internal/jobs_test.go b/internal/jobs_test.go deleted file mode 100644 index 8513168c8..000000000 --- a/internal/jobs_test.go +++ /dev/null @@ -1,25 +0,0 @@ -package internal - -import ( - "encoding/json" - "fmt" - "testing" - - "github.com/databricks/cli/internal/acc" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestAccCreateJob(t *testing.T) { - acc.WorkspaceTest(t) - env := GetEnvOrSkipTest(t, "CLOUD_ENV") - if env != "azure" { - t.Skipf("Not running test on cloud %s", env) - } - stdout, stderr := RequireSuccessfulRun(t, "jobs", "create", "--json", "@testjsons/create_job_without_workers.json", "--log-level=debug") - assert.Empty(t, stderr.String()) - var output map[string]int - err := json.Unmarshal(stdout.Bytes(), &output) - require.NoError(t, err) - RequireSuccessfulRun(t, "jobs", "delete", fmt.Sprint(output["job_id"]), "--log-level=debug") -} diff --git a/internal/storage_credentials_test.go b/internal/storage_credentials_test.go deleted file mode 100644 index 07c21861f..000000000 --- a/internal/storage_credentials_test.go +++ /dev/null @@ -1,19 +0,0 @@ -package internal - -import ( - "testing" - - "github.com/databricks/cli/internal/acc" - "github.com/stretchr/testify/assert" -) - -func TestAccStorageCredentialsListRendersResponse(t *testing.T) { - _, _ = acc.WorkspaceTest(t) - - // Check if metastore is assigned for the workspace, otherwise test will fail - t.Log(GetEnvOrSkipTest(t, "TEST_METASTORE_ID")) - - stdout, stderr := RequireSuccessfulRun(t, "storage-credentials", "list") - assert.NotEmpty(t, stdout) - assert.Empty(t, stderr) -} diff --git a/internal/testcli/README.md b/internal/testcli/README.md new file mode 100644 index 000000000..b37ae3bc9 --- /dev/null +++ b/internal/testcli/README.md @@ -0,0 +1,7 @@ +# testcli + +This package provides a way to run the CLI from tests as if it were a separate process. +By running the CLI inline we can still set breakpoints and step through execution. + +It transitively imports pretty much this entire repository, which is why we +intentionally keep this package _separate_ from `testutil`. diff --git a/internal/testcli/golden.go b/internal/testcli/golden.go new file mode 100644 index 000000000..669cc2f9b --- /dev/null +++ b/internal/testcli/golden.go @@ -0,0 +1,30 @@ +package testcli + +import ( + "context" + "fmt" + + "github.com/databricks/cli/internal/testutil" + "github.com/databricks/cli/libs/testdiff" + "github.com/stretchr/testify/assert" +) + +func captureOutput(t testutil.TestingT, ctx context.Context, args []string) string { + t.Helper() + r := NewRunner(t, ctx, args...) + stdout, stderr, err := r.Run() + assert.NoError(t, err) + return stderr.String() + stdout.String() +} + +func AssertOutput(t testutil.TestingT, ctx context.Context, args []string, expectedPath string) { + t.Helper() + out := captureOutput(t, ctx, args) + testdiff.AssertOutput(t, ctx, out, fmt.Sprintf("Output from %v", args), expectedPath) +} + +func AssertOutputJQ(t testutil.TestingT, ctx context.Context, args []string, expectedPath string, ignorePaths []string) { + t.Helper() + out := captureOutput(t, ctx, args) + testdiff.AssertOutputJQ(t, ctx, out, fmt.Sprintf("Output from %v", args), expectedPath, ignorePaths) +} diff --git a/internal/testcli/runner.go b/internal/testcli/runner.go new file mode 100644 index 000000000..d32fa3947 --- /dev/null +++ b/internal/testcli/runner.go @@ -0,0 +1,297 @@ +package testcli + +import ( + "bufio" + "bytes" + "context" + "encoding/json" + "io" + "strings" + "sync" + "time" + + "github.com/stretchr/testify/require" + + "github.com/databricks/cli/cmd" + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/internal/testutil" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" +) + +// Helper for running the root command in the background. +// It ensures that the background goroutine terminates upon +// test completion through cancelling the command context. +type Runner struct { + testutil.TestingT + + args []string + stdout bytes.Buffer + stderr bytes.Buffer + stdinR *io.PipeReader + stdinW *io.PipeWriter + + ctx context.Context + + // Line-by-line output. + // Background goroutines populate these channels by reading from stdout/stderr pipes. + StdoutLines <-chan string + StderrLines <-chan string + + errch <-chan error +} + +func consumeLines(ctx context.Context, wg *sync.WaitGroup, r io.Reader) <-chan string { + ch := make(chan string, 30000) + wg.Add(1) + go func() { + defer close(ch) + defer wg.Done() + scanner := bufio.NewScanner(r) + for scanner.Scan() { + // We expect to be able to always send these lines into the channel. + // If we can't, it means the channel is full and likely there is a problem + // in either the test or the code under test. + select { + case <-ctx.Done(): + return + case ch <- scanner.Text(): + continue + default: + panic("line buffer is full") + } + } + }() + return ch +} + +// Like [Runner.Eventually], but more specific +func (r *Runner) WaitForTextPrinted(text string, timeout time.Duration) { + r.Eventually(func() bool { + currentStdout := r.stdout.String() + return strings.Contains(currentStdout, text) + }, timeout, 50*time.Millisecond) +} + +func (r *Runner) WaitForOutput(text string, timeout time.Duration) { + require.Eventually(r, func() bool { + currentStdout := r.stdout.String() + currentErrout := r.stderr.String() + return strings.Contains(currentStdout, text) || strings.Contains(currentErrout, text) + }, timeout, 50*time.Millisecond) +} + +func (r *Runner) WithStdin() { + reader, writer := io.Pipe() + r.stdinR = reader + r.stdinW = writer +} + +func (r *Runner) CloseStdin() { + if r.stdinW == nil { + panic("no standard input configured") + } + r.stdinW.Close() +} + +func (r *Runner) SendText(text string) { + if r.stdinW == nil { + panic("no standard input configured") + } + _, err := r.stdinW.Write([]byte(text + "\n")) + if err != nil { + panic("Failed to to write to t.stdinW") + } +} + +func (r *Runner) RunBackground() { + var stdoutR, stderrR io.Reader + var stdoutW, stderrW io.WriteCloser + stdoutR, stdoutW = io.Pipe() + stderrR, stderrW = io.Pipe() + ctx := cmdio.NewContext(r.ctx, &cmdio.Logger{ + Mode: flags.ModeAppend, + Reader: bufio.Reader{}, + Writer: stderrW, + }) + + cli := cmd.New(ctx) + cli.SetOut(stdoutW) + cli.SetErr(stderrW) + cli.SetArgs(r.args) + if r.stdinW != nil { + cli.SetIn(r.stdinR) + } + + errch := make(chan error) + ctx, cancel := context.WithCancel(ctx) + + // Tee stdout/stderr to buffers. + stdoutR = io.TeeReader(stdoutR, &r.stdout) + stderrR = io.TeeReader(stderrR, &r.stderr) + + // Consume stdout/stderr line-by-line. + var wg sync.WaitGroup + r.StdoutLines = consumeLines(ctx, &wg, stdoutR) + r.StderrLines = consumeLines(ctx, &wg, stderrR) + + // Run command in background. + go func() { + err := root.Execute(ctx, cli) + if err != nil { + r.Logf("Error running command: %s", err) + } + + // Close pipes to signal EOF. + stdoutW.Close() + stderrW.Close() + + // Wait for the [consumeLines] routines to finish now that + // the pipes they're reading from have closed. + wg.Wait() + + if r.stdout.Len() > 0 { + // Make a copy of the buffer such that it remains "unread". + scanner := bufio.NewScanner(bytes.NewBuffer(r.stdout.Bytes())) + for scanner.Scan() { + r.Logf("[databricks stdout]: %s", scanner.Text()) + } + } + + if r.stderr.Len() > 0 { + // Make a copy of the buffer such that it remains "unread". + scanner := bufio.NewScanner(bytes.NewBuffer(r.stderr.Bytes())) + for scanner.Scan() { + r.Logf("[databricks stderr]: %s", scanner.Text()) + } + } + + // Make caller aware of error. + errch <- err + close(errch) + }() + + // Ensure command terminates upon test completion (success or failure). + r.Cleanup(func() { + // Signal termination of command. + cancel() + // Wait for goroutine to finish. + <-errch + }) + + r.errch = errch +} + +func (r *Runner) Run() (bytes.Buffer, bytes.Buffer, error) { + r.Helper() + var stdout, stderr bytes.Buffer + ctx := cmdio.NewContext(r.ctx, &cmdio.Logger{ + Mode: flags.ModeAppend, + Reader: bufio.Reader{}, + Writer: &stderr, + }) + + cli := cmd.New(ctx) + cli.SetOut(&stdout) + cli.SetErr(&stderr) + cli.SetArgs(r.args) + + r.Logf(" args: %s", strings.Join(r.args, ", ")) + + err := root.Execute(ctx, cli) + if err != nil { + r.Logf(" error: %s", err) + } + + if stdout.Len() > 0 { + // Make a copy of the buffer such that it remains "unread". + scanner := bufio.NewScanner(bytes.NewBuffer(stdout.Bytes())) + for scanner.Scan() { + r.Logf("stdout: %s", scanner.Text()) + } + } + + if stderr.Len() > 0 { + // Make a copy of the buffer such that it remains "unread". + scanner := bufio.NewScanner(bytes.NewBuffer(stderr.Bytes())) + for scanner.Scan() { + r.Logf("stderr: %s", scanner.Text()) + } + } + + return stdout, stderr, err +} + +// Like [require.Eventually] but errors if the underlying command has failed. +func (r *Runner) Eventually(condition func() bool, waitFor, tick time.Duration, msgAndArgs ...any) { + r.Helper() + ch := make(chan bool, 1) + + timer := time.NewTimer(waitFor) + defer timer.Stop() + + ticker := time.NewTicker(tick) + defer ticker.Stop() + + // Kick off condition check immediately. + go func() { ch <- condition() }() + + for tick := ticker.C; ; { + select { + case err := <-r.errch: + require.Fail(r, "Command failed", err) + return + case <-timer.C: + require.Fail(r, "Condition never satisfied", msgAndArgs...) + return + case <-tick: + tick = nil + go func() { ch <- condition() }() + case v := <-ch: + if v { + return + } + tick = ticker.C + } + } +} + +func (r *Runner) RunAndExpectOutput(heredoc string) { + r.Helper() + stdout, _, err := r.Run() + require.NoError(r, err) + require.Equal(r, cmdio.Heredoc(heredoc), strings.TrimSpace(stdout.String())) +} + +func (r *Runner) RunAndParseJSON(v any) { + r.Helper() + stdout, _, err := r.Run() + require.NoError(r, err) + err = json.Unmarshal(stdout.Bytes(), &v) + require.NoError(r, err) +} + +func NewRunner(t testutil.TestingT, ctx context.Context, args ...string) *Runner { + return &Runner{ + TestingT: t, + + ctx: ctx, + args: args, + } +} + +func RequireSuccessfulRun(t testutil.TestingT, ctx context.Context, args ...string) (bytes.Buffer, bytes.Buffer) { + t.Helper() + r := NewRunner(t, ctx, args...) + stdout, stderr, err := r.Run() + require.NoError(t, err) + return stdout, stderr +} + +func RequireErrorRun(t testutil.TestingT, ctx context.Context, args ...string) (bytes.Buffer, bytes.Buffer, error) { + t.Helper() + r := NewRunner(t, ctx, args...) + stdout, stderr, err := r.Run() + require.Error(t, err) + return stdout, stderr, err +} diff --git a/internal/testutil/cloud.go b/internal/testutil/cloud.go index ba5b75ecf..33921db0c 100644 --- a/internal/testutil/cloud.go +++ b/internal/testutil/cloud.go @@ -1,9 +1,5 @@ package testutil -import ( - "testing" -) - type Cloud int const ( @@ -13,7 +9,7 @@ const ( ) // Implement [Requirement]. -func (c Cloud) Verify(t *testing.T) { +func (c Cloud) Verify(t TestingT) { if c != GetCloud(t) { t.Skipf("Skipping %s-specific test", c) } @@ -32,7 +28,20 @@ func (c Cloud) String() string { } } -func GetCloud(t *testing.T) Cloud { +func (c Cloud) NodeTypeID() string { + switch c { + case AWS: + return "i3.xlarge" + case Azure: + return "Standard_DS4_v2" + case GCP: + return "n1-standard-4" + default: + return "unknown" + } +} + +func GetCloud(t TestingT) Cloud { env := GetEnvOrSkipTest(t, "CLOUD_ENV") switch env { case "aws": @@ -49,7 +58,3 @@ func GetCloud(t *testing.T) Cloud { } return -1 } - -func IsAWSCloud(t *testing.T) bool { - return GetCloud(t) == AWS -} diff --git a/internal/testutil/copy.go b/internal/testutil/copy.go index 21faece00..a521da3e3 100644 --- a/internal/testutil/copy.go +++ b/internal/testutil/copy.go @@ -5,14 +5,13 @@ import ( "io/fs" "os" "path/filepath" - "testing" "github.com/stretchr/testify/require" ) // CopyDirectory copies the contents of a directory to another directory. // The destination directory is created if it does not exist. -func CopyDirectory(t *testing.T, src, dst string) { +func CopyDirectory(t TestingT, src, dst string) { err := filepath.WalkDir(src, func(path string, d fs.DirEntry, err error) error { if err != nil { return err @@ -22,7 +21,7 @@ func CopyDirectory(t *testing.T, src, dst string) { require.NoError(t, err) if d.IsDir() { - return os.MkdirAll(filepath.Join(dst, rel), 0755) + return os.MkdirAll(filepath.Join(dst, rel), 0o755) } // Copy the file to the temporary directory diff --git a/internal/testutil/env.go b/internal/testutil/env.go index e1973ba82..598229655 100644 --- a/internal/testutil/env.go +++ b/internal/testutil/env.go @@ -5,7 +5,6 @@ import ( "path/filepath" "runtime" "strings" - "testing" "github.com/stretchr/testify/require" ) @@ -13,7 +12,7 @@ import ( // CleanupEnvironment sets up a pristine environment containing only $PATH and $HOME. // The original environment is restored upon test completion. // Note: use of this function is incompatible with parallel execution. -func CleanupEnvironment(t *testing.T) { +func CleanupEnvironment(t TestingT) { // Restore environment when test finishes. environ := os.Environ() t.Cleanup(func() { @@ -39,20 +38,18 @@ func CleanupEnvironment(t *testing.T) { } } -// GetEnvOrSkipTest proceeds with test only with that env variable -func GetEnvOrSkipTest(t *testing.T, name string) string { - value := os.Getenv(name) - if value == "" { - t.Skipf("Environment variable %s is missing", name) - } - return value -} - // Changes into specified directory for the duration of the test. // Returns the current working directory. -func Chdir(t *testing.T, dir string) string { +func Chdir(t TestingT, dir string) string { + // Prevent parallel execution when changing the working directory. + // t.Setenv automatically fails if t.Parallel is set. + t.Setenv("DO_NOT_RUN_IN_PARALLEL", "true") + wd, err := os.Getwd() require.NoError(t, err) + if os.Getenv("TESTS_ORIG_WD") == "" { + t.Setenv("TESTS_ORIG_WD", wd) + } abs, err := filepath.Abs(dir) require.NoError(t, err) @@ -67,3 +64,10 @@ func Chdir(t *testing.T, dir string) string { return wd } + +// Return filename ff testutil.Chdir was not called. +// Return absolute path to filename testutil.Chdir() was called. +func TestData(filename string) string { + // Note, if TESTS_ORIG_WD is not set, Getenv return "" and Join returns filename + return filepath.Join(os.Getenv("TESTS_ORIG_WD"), filename) +} diff --git a/internal/testutil/file.go b/internal/testutil/file.go index ba2c3280e..476c4123a 100644 --- a/internal/testutil/file.go +++ b/internal/testutil/file.go @@ -3,24 +3,24 @@ package testutil import ( "os" "path/filepath" - "testing" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) -func TouchNotebook(t *testing.T, elems ...string) string { +func TouchNotebook(t TestingT, elems ...string) string { path := filepath.Join(elems...) - err := os.MkdirAll(filepath.Dir(path), 0755) + err := os.MkdirAll(filepath.Dir(path), 0o755) require.NoError(t, err) - err = os.WriteFile(path, []byte("# Databricks notebook source"), 0644) + err = os.WriteFile(path, []byte("# Databricks notebook source"), 0o644) require.NoError(t, err) return path } -func Touch(t *testing.T, elems ...string) string { +func Touch(t TestingT, elems ...string) string { path := filepath.Join(elems...) - err := os.MkdirAll(filepath.Dir(path), 0755) + err := os.MkdirAll(filepath.Dir(path), 0o755) require.NoError(t, err) f, err := os.Create(path) @@ -31,9 +31,9 @@ func Touch(t *testing.T, elems ...string) string { return path } -func WriteFile(t *testing.T, content string, elems ...string) string { - path := filepath.Join(elems...) - err := os.MkdirAll(filepath.Dir(path), 0755) +// WriteFile writes content to a file. +func WriteFile(t TestingT, path, content string) { + err := os.MkdirAll(filepath.Dir(path), 0o755) require.NoError(t, err) f, err := os.Create(path) @@ -44,5 +44,40 @@ func WriteFile(t *testing.T, content string, elems ...string) string { err = f.Close() require.NoError(t, err) - return path +} + +// ReadFile reads a file and returns its content as a string. +func ReadFile(t TestingT, path string) string { + b, err := os.ReadFile(path) + require.NoError(t, err) + + return string(b) +} + +// StatFile returns the file info for a file. +func StatFile(t TestingT, path string) os.FileInfo { + fi, err := os.Stat(path) + require.NoError(t, err) + + return fi +} + +// AssertFileContents asserts that the file at path has the expected content. +func AssertFileContents(t TestingT, path, expected string) bool { + actual := ReadFile(t, path) + return assert.Equal(t, expected, actual) +} + +// AssertFilePermissions asserts that the file at path has the expected permissions. +func AssertFilePermissions(t TestingT, path string, expected os.FileMode) bool { + fi := StatFile(t, path) + assert.False(t, fi.Mode().IsDir(), "expected a file, got a directory") + return assert.Equal(t, expected, fi.Mode().Perm(), "expected 0%o, got 0%o", expected, fi.Mode().Perm()) +} + +// AssertDirPermissions asserts that the file at path has the expected permissions. +func AssertDirPermissions(t TestingT, path string, expected os.FileMode) bool { + fi := StatFile(t, path) + assert.True(t, fi.Mode().IsDir(), "expected a directory, got a file") + return assert.Equal(t, expected, fi.Mode().Perm(), "expected 0%o, got 0%o", expected, fi.Mode().Perm()) } diff --git a/internal/acc/helpers.go b/internal/testutil/helpers.go similarity index 59% rename from internal/acc/helpers.go rename to internal/testutil/helpers.go index f98001346..44c2c9375 100644 --- a/internal/acc/helpers.go +++ b/internal/testutil/helpers.go @@ -1,15 +1,17 @@ -package acc +package testutil import ( "fmt" "math/rand" "os" "strings" - "testing" + "time" + + "github.com/stretchr/testify/require" ) // GetEnvOrSkipTest proceeds with test only with that env variable. -func GetEnvOrSkipTest(t *testing.T, name string) string { +func GetEnvOrSkipTest(t TestingT, name string) string { value := os.Getenv(name) if value == "" { t.Skipf("Environment variable %s is missing", name) @@ -24,10 +26,19 @@ func RandomName(prefix ...string) string { randLen := 12 b := make([]byte, randLen) for i := range b { - b[i] = charset[rand.Intn(randLen)] + b[i] = charset[rand.Intn(len(charset))] } if len(prefix) > 0 { return fmt.Sprintf("%s%s", strings.Join(prefix, ""), b) } return string(b) } + +func SkipUntil(t TestingT, date string) { + deadline, err := time.Parse(time.DateOnly, date) + require.NoError(t, err) + + if time.Now().Before(deadline) { + t.Skipf("Skipping test until %s. Time right now: %s", deadline.Format(time.DateOnly), time.Now()) + } +} diff --git a/internal/testutil/interface.go b/internal/testutil/interface.go new file mode 100644 index 000000000..97441212d --- /dev/null +++ b/internal/testutil/interface.go @@ -0,0 +1,29 @@ +package testutil + +// TestingT is an interface wrapper around *testing.T that provides the methods +// that are used by the test package to convey information about test failures. +// +// We use an interface so we can wrap *testing.T and provide additional functionality. +type TestingT interface { + Log(args ...any) + Logf(format string, args ...any) + + Error(args ...any) + Errorf(format string, args ...any) + + Fatal(args ...any) + Fatalf(format string, args ...any) + + Skip(args ...any) + Skipf(format string, args ...any) + + FailNow() + + Cleanup(func()) + + Setenv(key, value string) + + TempDir() string + + Helper() +} diff --git a/internal/testutil/jdk.go b/internal/testutil/jdk.go index 05bd7d6d6..60fa439db 100644 --- a/internal/testutil/jdk.go +++ b/internal/testutil/jdk.go @@ -5,12 +5,11 @@ import ( "context" "os/exec" "strings" - "testing" "github.com/stretchr/testify/require" ) -func RequireJDK(t *testing.T, ctx context.Context, version string) { +func RequireJDK(t TestingT, ctx context.Context, version string) { var stderr bytes.Buffer cmd := exec.Command("javac", "-version") diff --git a/internal/testutil/requirement.go b/internal/testutil/requirement.go index 53855e0b5..e182b7518 100644 --- a/internal/testutil/requirement.go +++ b/internal/testutil/requirement.go @@ -1,18 +1,14 @@ package testutil -import ( - "testing" -) - // Requirement is the interface for test requirements. type Requirement interface { - Verify(t *testing.T) + Verify(t TestingT) } // Require should be called at the beginning of a test to ensure that all // requirements are met before running the test. // If any requirement is not met, the test will be skipped. -func Require(t *testing.T, requirements ...Requirement) { +func Require(t TestingT, requirements ...Requirement) { for _, r := range requirements { r.Verify(t) } diff --git a/internal/testutil/testutil_test.go b/internal/testutil/testutil_test.go new file mode 100644 index 000000000..d41374d55 --- /dev/null +++ b/internal/testutil/testutil_test.go @@ -0,0 +1,36 @@ +package testutil_test + +import ( + "go/parser" + "go/token" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// TestNoTestingImport checks that no file in the package imports the testing package. +// All exported functions must use the TestingT interface instead of *testing.T. +func TestNoTestingImport(t *testing.T) { + // Parse the package + fset := token.NewFileSet() + pkgs, err := parser.ParseDir(fset, ".", nil, parser.AllErrors) + require.NoError(t, err) + + // Iterate through the files in the package + for _, pkg := range pkgs { + for _, file := range pkg.Files { + // Skip test files + if strings.HasSuffix(fset.Position(file.Pos()).Filename, "_test.go") { + continue + } + // Check the imports of each file + for _, imp := range file.Imports { + if imp.Path.Value == `"testing"` { + assert.Fail(t, "File imports the testing package", "File %s imports the testing package", fset.Position(file.Pos()).Filename) + } + } + } + } +} diff --git a/libs/auth/cache/file_test.go b/libs/auth/cache/file_test.go index 3e4aae36f..54964bed3 100644 --- a/libs/auth/cache/file_test.go +++ b/libs/auth/cache/file_test.go @@ -42,7 +42,7 @@ func TestStoreAndLookup(t *testing.T) { tok, err := l.Lookup("x") require.NoError(t, err) assert.Equal(t, "abc", tok.AccessToken) - assert.Equal(t, 2, len(l.Tokens)) + assert.Len(t, l.Tokens, 2) _, err = l.Lookup("z") assert.Equal(t, ErrNotConfigured, err) diff --git a/libs/auth/callback.go b/libs/auth/callback.go index 5a2400697..3893a5041 100644 --- a/libs/auth/callback.go +++ b/libs/auth/callback.go @@ -53,7 +53,9 @@ func newCallback(ctx context.Context, a *PersistentAuth) (*callbackServer, error a: a, } cb.srv.Handler = cb - go cb.srv.Serve(cb.ln) + go func() { + _ = cb.srv.Serve(cb.ln) + }() return cb, nil } diff --git a/libs/auth/oauth.go b/libs/auth/oauth.go index 026c45468..1037a5a85 100644 --- a/libs/auth/oauth.go +++ b/libs/auth/oauth.go @@ -107,7 +107,7 @@ func (a *PersistentAuth) Load(ctx context.Context) (*oauth2.Token, error) { func (a *PersistentAuth) ProfileName() string { if a.AccountID != "" { - return fmt.Sprintf("ACCOUNT-%s", a.AccountID) + return "ACCOUNT-" + a.AccountID } host := strings.TrimPrefix(a.Host, "https://") split := strings.Split(host, ".") @@ -210,12 +210,12 @@ func (a *PersistentAuth) oidcEndpoints(ctx context.Context) (*oauthAuthorization prefix := a.key() if a.AccountID != "" { return &oauthAuthorizationServer{ - AuthorizationEndpoint: fmt.Sprintf("%s/v1/authorize", prefix), - TokenEndpoint: fmt.Sprintf("%s/v1/token", prefix), + AuthorizationEndpoint: prefix + "/v1/authorize", + TokenEndpoint: prefix + "/v1/token", }, nil } var oauthEndpoints oauthAuthorizationServer - oidc := fmt.Sprintf("%s/oidc/.well-known/oauth-authorization-server", prefix) + oidc := prefix + "/oidc/.well-known/oauth-authorization-server" err := a.http.Do(ctx, "GET", oidc, httpclient.WithResponseUnmarshal(&oauthEndpoints)) if err != nil { return nil, fmt.Errorf("fetch .well-known: %w", err) @@ -247,7 +247,7 @@ func (a *PersistentAuth) oauth2Config(ctx context.Context) (*oauth2.Config, erro TokenURL: endpoints.TokenEndpoint, AuthStyle: oauth2.AuthStyleInParams, }, - RedirectURL: fmt.Sprintf("http://%s", appRedirectAddr), + RedirectURL: "http://" + appRedirectAddr, Scopes: scopes, }, nil } @@ -258,7 +258,7 @@ func (a *PersistentAuth) oauth2Config(ctx context.Context) (*oauth2.Config, erro func (a *PersistentAuth) key() string { a.Host = strings.TrimSuffix(a.Host, "/") if !strings.HasPrefix(a.Host, "http") { - a.Host = fmt.Sprintf("https://%s", a.Host) + a.Host = "https://" + a.Host } if a.AccountID != "" { return fmt.Sprintf("%s/oidc/accounts/%s", a.Host, a.AccountID) diff --git a/libs/auth/oauth_test.go b/libs/auth/oauth_test.go index 837ff4fee..6c3b9bf47 100644 --- a/libs/auth/oauth_test.go +++ b/libs/auth/oauth_test.go @@ -112,7 +112,7 @@ func TestLoadRefresh(t *testing.T) { }, }.ApplyClient(t, func(ctx context.Context, c *client.DatabricksClient) { ctx = useInsecureOAuthHttpClientForTests(ctx) - expectedKey := fmt.Sprintf("%s/oidc/accounts/xyz", c.Config.Host) + expectedKey := c.Config.Host + "/oidc/accounts/xyz" p := &PersistentAuth{ Host: c.Config.Host, AccountID: "xyz", @@ -149,7 +149,7 @@ func TestChallenge(t *testing.T) { }, }.ApplyClient(t, func(ctx context.Context, c *client.DatabricksClient) { ctx = useInsecureOAuthHttpClientForTests(ctx) - expectedKey := fmt.Sprintf("%s/oidc/accounts/xyz", c.Config.Host) + expectedKey := c.Config.Host + "/oidc/accounts/xyz" browserOpened := make(chan string) p := &PersistentAuth{ diff --git a/libs/cmdgroup/command_test.go b/libs/cmdgroup/command_test.go index f3e3fe6ab..2c248f09f 100644 --- a/libs/cmdgroup/command_test.go +++ b/libs/cmdgroup/command_test.go @@ -42,7 +42,8 @@ func TestCommandFlagGrouping(t *testing.T) { buf := bytes.NewBuffer(nil) cmd.SetOutput(buf) - cmd.Usage() + err := cmd.Usage() + require.NoError(t, err) expected := `Usage: parent test [flags] diff --git a/libs/cmdio/error_event.go b/libs/cmdio/error_event.go index 933f9d0d0..62897995b 100644 --- a/libs/cmdio/error_event.go +++ b/libs/cmdio/error_event.go @@ -1,13 +1,11 @@ package cmdio -import "fmt" - type ErrorEvent struct { Error string `json:"error"` } func (event *ErrorEvent) String() string { - return fmt.Sprintf("Error: %s", event.Error) + return "Error: " + event.Error } func (event *ErrorEvent) IsInplaceSupported() bool { diff --git a/libs/cmdio/io.go b/libs/cmdio/io.go index 75c0c4b87..c0e9e868a 100644 --- a/libs/cmdio/io.go +++ b/libs/cmdio/io.go @@ -31,9 +31,9 @@ type cmdIO struct { err io.Writer } -func NewIO(outputFormat flags.Output, in io.Reader, out io.Writer, err io.Writer, headerTemplate, template string) *cmdIO { +func NewIO(ctx context.Context, outputFormat flags.Output, in io.Reader, out, err io.Writer, headerTemplate, template string) *cmdIO { // The check below is similar to color.NoColor but uses the specified err writer. - dumb := os.Getenv("NO_COLOR") != "" || os.Getenv("TERM") == "dumb" + dumb := env.Get(ctx, "NO_COLOR") != "" || env.Get(ctx, "TERM") == "dumb" if f, ok := err.(*os.File); ok && !dumb { dumb = !isatty.IsTerminal(f.Fd()) && !isatty.IsCygwinTerminal(f.Fd()) } diff --git a/libs/cmdio/logger.go b/libs/cmdio/logger.go index 45b1883ce..7edad5bf0 100644 --- a/libs/cmdio/logger.go +++ b/libs/cmdio/logger.go @@ -4,6 +4,7 @@ import ( "bufio" "context" "encoding/json" + "errors" "fmt" "io" "os" @@ -124,7 +125,7 @@ func splitAtLastNewLine(s string) (string, string) { func (l *Logger) AskSelect(question string, choices []string) (string, error) { if l.Mode == flags.ModeJson { - return "", fmt.Errorf("question prompts are not supported in json mode") + return "", errors.New("question prompts are not supported in json mode") } // Promptui does not support multiline prompts. So we split the question. @@ -140,7 +141,7 @@ func (l *Logger) AskSelect(question string, choices []string) (string, error) { HideHelp: true, Templates: &promptui.SelectTemplates{ Label: "{{.}}: ", - Selected: fmt.Sprintf("%s: {{.}}", last), + Selected: last + ": {{.}}", }, } @@ -151,9 +152,9 @@ func (l *Logger) AskSelect(question string, choices []string) (string, error) { return ans, nil } -func (l *Logger) Ask(question string, defaultVal string) (string, error) { +func (l *Logger) Ask(question, defaultVal string) (string, error) { if l.Mode == flags.ModeJson { - return "", fmt.Errorf("question prompts are not supported in json mode") + return "", errors.New("question prompts are not supported in json mode") } // Add default value to question prompt. @@ -188,29 +189,29 @@ func (l *Logger) writeJson(event Event) { // we panic because there we cannot catch this in jobs.RunNowAndWait panic(err) } - l.Writer.Write([]byte(b)) - l.Writer.Write([]byte("\n")) + _, _ = l.Writer.Write([]byte(b)) + _, _ = l.Writer.Write([]byte("\n")) } func (l *Logger) writeAppend(event Event) { - l.Writer.Write([]byte(event.String())) - l.Writer.Write([]byte("\n")) + _, _ = l.Writer.Write([]byte(event.String())) + _, _ = l.Writer.Write([]byte("\n")) } func (l *Logger) writeInplace(event Event) { if l.isFirstEvent { // save cursor location - l.Writer.Write([]byte("\033[s")) + _, _ = l.Writer.Write([]byte("\033[s")) } // move cursor to saved location - l.Writer.Write([]byte("\033[u")) + _, _ = l.Writer.Write([]byte("\033[u")) // clear from cursor to end of screen - l.Writer.Write([]byte("\033[0J")) + _, _ = l.Writer.Write([]byte("\033[0J")) - l.Writer.Write([]byte(event.String())) - l.Writer.Write([]byte("\n")) + _, _ = l.Writer.Write([]byte(event.String())) + _, _ = l.Writer.Write([]byte("\n")) l.isFirstEvent = false } @@ -234,5 +235,4 @@ func (l *Logger) Log(event Event) { // jobs.RunNowAndWait panic("unknown progress logger mode: " + l.Mode.String()) } - } diff --git a/libs/cmdio/render.go b/libs/cmdio/render.go index c68ddca0d..1a6aadcfa 100644 --- a/libs/cmdio/render.go +++ b/libs/cmdio/render.go @@ -39,7 +39,7 @@ func Heredoc(tmpl string) (trimmed string) { break } } - for i := 0; i < len(lines); i++ { + for i := range lines { if lines[i] == "" || strings.TrimSpace(lines[i]) == "" { continue } @@ -361,7 +361,9 @@ func renderUsingTemplate(ctx context.Context, r templateRenderer, w io.Writer, h if err != nil { return err } - tw.Write([]byte("\n")) + if _, err := tw.Write([]byte("\n")); err != nil { + return err + } // Do not flush here. Instead, allow the first 100 resources to determine the initial spacing of the header columns. } t, err := base.Parse(tmpl) diff --git a/libs/cmdio/render_test.go b/libs/cmdio/render_test.go index 6bde446c4..51b385b1d 100644 --- a/libs/cmdio/render_test.go +++ b/libs/cmdio/render_test.go @@ -55,7 +55,7 @@ func (d *dummyIterator) Next(ctx context.Context) (*provisioning.Workspace, erro func makeWorkspaces(count int) []*provisioning.Workspace { res := make([]*provisioning.Workspace, 0, count) next := []*provisioning.Workspace{&dummyWorkspace1, &dummyWorkspace2} - for i := 0; i < count; i++ { + for range count { n := next[0] next = append(next[1:], n) res = append(res, n) @@ -74,7 +74,7 @@ func makeIterator(count int) listing.Iterator[*provisioning.Workspace] { func makeBigOutput(count int) string { res := bytes.Buffer{} for _, ws := range makeWorkspaces(count) { - res.Write([]byte(fmt.Sprintf("%d %s\n", ws.WorkspaceId, ws.WorkspaceName))) + res.WriteString(fmt.Sprintf("%d %s\n", ws.WorkspaceId, ws.WorkspaceName)) } return res.String() } @@ -171,8 +171,9 @@ func TestRender(t *testing.T) { for _, c := range testCases { t.Run(c.name, func(t *testing.T) { output := &bytes.Buffer{} - cmdIO := NewIO(c.outputFormat, nil, output, output, c.headerTemplate, c.template) - ctx := InContext(context.Background(), cmdIO) + ctx := context.Background() + cmdIO := NewIO(ctx, c.outputFormat, nil, output, output, c.headerTemplate, c.template) + ctx = InContext(ctx, cmdIO) var err error if vv, ok := c.v.(listing.Iterator[*provisioning.Workspace]); ok { err = RenderIterator(ctx, vv) diff --git a/libs/databrickscfg/cfgpickers/clusters.go b/libs/databrickscfg/cfgpickers/clusters.go index cac1b08a7..e27d13690 100644 --- a/libs/databrickscfg/cfgpickers/clusters.go +++ b/libs/databrickscfg/cfgpickers/clusters.go @@ -18,8 +18,10 @@ import ( var minUcRuntime = canonicalVersion("v12.0") -var dbrVersionRegex = regexp.MustCompile(`^(\d+\.\d+)\.x-.*`) -var dbrSnapshotVersionRegex = regexp.MustCompile(`^(\d+)\.x-snapshot.*`) +var ( + dbrVersionRegex = regexp.MustCompile(`^(\d+\.\d+)\.x-.*`) + dbrSnapshotVersionRegex = regexp.MustCompile(`^(\d+)\.x-snapshot.*`) +) func canonicalVersion(v string) string { return semver.Canonical("v" + strings.TrimPrefix(v, "v")) @@ -31,7 +33,7 @@ func GetRuntimeVersion(cluster compute.ClusterDetails) (string, bool) { match = dbrSnapshotVersionRegex.FindStringSubmatch(cluster.SparkVersion) if len(match) > 1 { // we return 14.999 for 14.x-snapshot for semver.Compare() to work properly - return fmt.Sprintf("%s.999", match[1]), true + return match[1] + ".999", true } return "", false } diff --git a/libs/databrickscfg/cfgpickers/clusters_test.go b/libs/databrickscfg/cfgpickers/clusters_test.go index d17e86d4a..cde09aa44 100644 --- a/libs/databrickscfg/cfgpickers/clusters_test.go +++ b/libs/databrickscfg/cfgpickers/clusters_test.go @@ -115,7 +115,7 @@ func TestFirstCompatibleCluster(t *testing.T) { w := databricks.Must(databricks.NewWorkspaceClient((*databricks.Config)(cfg))) ctx := context.Background() - ctx = cmdio.InContext(ctx, cmdio.NewIO(flags.OutputText, &bytes.Buffer{}, &bytes.Buffer{}, &bytes.Buffer{}, "", "...")) + ctx = cmdio.InContext(ctx, cmdio.NewIO(ctx, flags.OutputText, &bytes.Buffer{}, &bytes.Buffer{}, &bytes.Buffer{}, "", "...")) clusterID, err := AskForCluster(ctx, w, WithDatabricksConnect("13.1")) require.NoError(t, err) require.Equal(t, "bcd-id", clusterID) @@ -162,7 +162,7 @@ func TestNoCompatibleClusters(t *testing.T) { w := databricks.Must(databricks.NewWorkspaceClient((*databricks.Config)(cfg))) ctx := context.Background() - ctx = cmdio.InContext(ctx, cmdio.NewIO(flags.OutputText, &bytes.Buffer{}, &bytes.Buffer{}, &bytes.Buffer{}, "", "...")) + ctx = cmdio.InContext(ctx, cmdio.NewIO(ctx, flags.OutputText, &bytes.Buffer{}, &bytes.Buffer{}, &bytes.Buffer{}, "", "...")) _, err := AskForCluster(ctx, w, WithDatabricksConnect("13.1")) require.Equal(t, ErrNoCompatibleClusters, err) } diff --git a/libs/databrickscfg/loader.go b/libs/databrickscfg/loader.go index 12a516c59..84c8398bf 100644 --- a/libs/databrickscfg/loader.go +++ b/libs/databrickscfg/loader.go @@ -19,7 +19,7 @@ var errNoMatchingProfiles = errors.New("no matching config profiles found") type errMultipleProfiles []string func (e errMultipleProfiles) Error() string { - return fmt.Sprintf("multiple profiles matched: %s", strings.Join(e, ", ")) + return "multiple profiles matched: " + strings.Join(e, ", ") } func findMatchingProfile(configFile *config.File, matcher func(*ini.Section) bool) (*ini.Section, error) { diff --git a/libs/databrickscfg/ops_test.go b/libs/databrickscfg/ops_test.go index 3ea92024c..dd8484fb7 100644 --- a/libs/databrickscfg/ops_test.go +++ b/libs/databrickscfg/ops_test.go @@ -216,7 +216,7 @@ func TestSaveToProfile_ClearingPreviousProfile(t *testing.T) { dlft, err := file.GetSection("DEFAULT") assert.NoError(t, err) - assert.Len(t, dlft.KeysHash(), 0) + assert.Empty(t, dlft.KeysHash()) abc, err := file.GetSection("abc") assert.NoError(t, err) diff --git a/libs/databrickscfg/profile/file_test.go b/libs/databrickscfg/profile/file_test.go index 8e5cfefc0..6bcaec4b7 100644 --- a/libs/databrickscfg/profile/file_test.go +++ b/libs/databrickscfg/profile/file_test.go @@ -11,10 +11,10 @@ import ( ) func TestProfileCloud(t *testing.T) { - assert.Equal(t, Profile{Host: "https://dbc-XXXXXXXX-YYYY.cloud.databricks.com"}.Cloud(), "AWS") - assert.Equal(t, Profile{Host: "https://adb-xxx.y.azuredatabricks.net/"}.Cloud(), "Azure") - assert.Equal(t, Profile{Host: "https://workspace.gcp.databricks.com/"}.Cloud(), "GCP") - assert.Equal(t, Profile{Host: "https://some.invalid.host.com/"}.Cloud(), "AWS") + assert.Equal(t, "AWS", Profile{Host: "https://dbc-XXXXXXXX-YYYY.cloud.databricks.com"}.Cloud()) + assert.Equal(t, "Azure", Profile{Host: "https://adb-xxx.y.azuredatabricks.net/"}.Cloud()) + assert.Equal(t, "GCP", Profile{Host: "https://workspace.gcp.databricks.com/"}.Cloud()) + assert.Equal(t, "AWS", Profile{Host: "https://some.invalid.host.com/"}.Cloud()) } func TestProfilesSearchCaseInsensitive(t *testing.T) { diff --git a/libs/dyn/convert/from_typed.go b/libs/dyn/convert/from_typed.go index cd92ad0eb..3de017b75 100644 --- a/libs/dyn/convert/from_typed.go +++ b/libs/dyn/convert/from_typed.go @@ -126,7 +126,7 @@ func fromTypedStruct(src reflect.Value, ref dyn.Value, options ...fromTypedOptio // Either if the key was set in the reference or the field is not zero-valued, we include it. if ok || nv.Kind() != dyn.KindNil { - out.Set(refk, nv) + out.Set(refk, nv) // nolint:errcheck } } @@ -184,7 +184,7 @@ func fromTypedMap(src reflect.Value, ref dyn.Value) (dyn.Value, error) { // Every entry is represented, even if it is a nil. // Otherwise, a map with zero-valued structs would yield a nil as well. - out.Set(refk, nv) + out.Set(refk, nv) //nolint:errcheck } return dyn.V(out), nil @@ -209,7 +209,7 @@ func fromTypedSlice(src reflect.Value, ref dyn.Value) (dyn.Value, error) { } out := make([]dyn.Value, src.Len()) - for i := 0; i < src.Len(); i++ { + for i := range src.Len() { v := src.Index(i) refv := ref.Index(i) diff --git a/libs/dyn/convert/from_typed_test.go b/libs/dyn/convert/from_typed_test.go index 0cddff3be..8a05bfb38 100644 --- a/libs/dyn/convert/from_typed_test.go +++ b/libs/dyn/convert/from_typed_test.go @@ -325,7 +325,7 @@ func TestFromTypedMapNil(t *testing.T) { } func TestFromTypedMapEmpty(t *testing.T) { - var src = map[string]string{} + src := map[string]string{} ref := dyn.V(map[string]dyn.Value{ "foo": dyn.V("bar"), @@ -338,7 +338,7 @@ func TestFromTypedMapEmpty(t *testing.T) { } func TestFromTypedMapNonEmpty(t *testing.T) { - var src = map[string]string{ + src := map[string]string{ "foo": "foo", "bar": "bar", } @@ -353,7 +353,7 @@ func TestFromTypedMapNonEmpty(t *testing.T) { } func TestFromTypedMapNonEmptyRetainLocation(t *testing.T) { - var src = map[string]string{ + src := map[string]string{ "foo": "bar", "bar": "qux", } @@ -372,7 +372,7 @@ func TestFromTypedMapNonEmptyRetainLocation(t *testing.T) { } func TestFromTypedMapFieldWithZeroValue(t *testing.T) { - var src = map[string]string{ + src := map[string]string{ "foo": "", } @@ -398,7 +398,7 @@ func TestFromTypedSliceNil(t *testing.T) { } func TestFromTypedSliceEmpty(t *testing.T) { - var src = []string{} + src := []string{} ref := dyn.V([]dyn.Value{ dyn.V("bar"), @@ -411,7 +411,7 @@ func TestFromTypedSliceEmpty(t *testing.T) { } func TestFromTypedSliceNonEmpty(t *testing.T) { - var src = []string{ + src := []string{ "foo", "bar", } @@ -426,7 +426,7 @@ func TestFromTypedSliceNonEmpty(t *testing.T) { } func TestFromTypedSliceNonEmptyRetainLocation(t *testing.T) { - var src = []string{ + src := []string{ "foo", "bar", } @@ -446,7 +446,7 @@ func TestFromTypedSliceNonEmptyRetainLocation(t *testing.T) { func TestFromTypedStringEmpty(t *testing.T) { var src string - var ref = dyn.NilValue + ref := dyn.NilValue nv, err := FromTyped(src, ref) require.NoError(t, err) assert.Equal(t, dyn.NilValue, nv) @@ -454,7 +454,7 @@ func TestFromTypedStringEmpty(t *testing.T) { func TestFromTypedStringEmptyOverwrite(t *testing.T) { var src string - var ref = dyn.V("old") + ref := dyn.V("old") nv, err := FromTyped(src, ref) require.NoError(t, err) assert.Equal(t, dyn.V(""), nv) @@ -462,7 +462,7 @@ func TestFromTypedStringEmptyOverwrite(t *testing.T) { func TestFromTypedStringNonEmpty(t *testing.T) { var src string = "new" - var ref = dyn.NilValue + ref := dyn.NilValue nv, err := FromTyped(src, ref) require.NoError(t, err) assert.Equal(t, dyn.V("new"), nv) @@ -470,14 +470,14 @@ func TestFromTypedStringNonEmpty(t *testing.T) { func TestFromTypedStringNonEmptyOverwrite(t *testing.T) { var src string = "new" - var ref = dyn.V("old") + ref := dyn.V("old") nv, err := FromTyped(src, ref) require.NoError(t, err) assert.Equal(t, dyn.V("new"), nv) } func TestFromTypedStringRetainsLocations(t *testing.T) { - var ref = dyn.NewValue("foo", []dyn.Location{{File: "foo"}}) + ref := dyn.NewValue("foo", []dyn.Location{{File: "foo"}}) // case: value has not been changed var src string = "foo" @@ -494,14 +494,14 @@ func TestFromTypedStringRetainsLocations(t *testing.T) { func TestFromTypedStringTypeError(t *testing.T) { var src string = "foo" - var ref = dyn.V(1234) + ref := dyn.V(1234) _, err := FromTyped(src, ref) require.Error(t, err) } func TestFromTypedBoolEmpty(t *testing.T) { var src bool - var ref = dyn.NilValue + ref := dyn.NilValue nv, err := FromTyped(src, ref) require.NoError(t, err) assert.Equal(t, dyn.NilValue, nv) @@ -509,7 +509,7 @@ func TestFromTypedBoolEmpty(t *testing.T) { func TestFromTypedBoolEmptyOverwrite(t *testing.T) { var src bool - var ref = dyn.V(true) + ref := dyn.V(true) nv, err := FromTyped(src, ref) require.NoError(t, err) assert.Equal(t, dyn.V(false), nv) @@ -517,7 +517,7 @@ func TestFromTypedBoolEmptyOverwrite(t *testing.T) { func TestFromTypedBoolNonEmpty(t *testing.T) { var src bool = true - var ref = dyn.NilValue + ref := dyn.NilValue nv, err := FromTyped(src, ref) require.NoError(t, err) assert.Equal(t, dyn.V(true), nv) @@ -525,14 +525,14 @@ func TestFromTypedBoolNonEmpty(t *testing.T) { func TestFromTypedBoolNonEmptyOverwrite(t *testing.T) { var src bool = true - var ref = dyn.V(false) + ref := dyn.V(false) nv, err := FromTyped(src, ref) require.NoError(t, err) assert.Equal(t, dyn.V(true), nv) } func TestFromTypedBoolRetainsLocations(t *testing.T) { - var ref = dyn.NewValue(true, []dyn.Location{{File: "foo"}}) + ref := dyn.NewValue(true, []dyn.Location{{File: "foo"}}) // case: value has not been changed var src bool = true @@ -549,7 +549,7 @@ func TestFromTypedBoolRetainsLocations(t *testing.T) { func TestFromTypedBoolVariableReference(t *testing.T) { var src bool = true - var ref = dyn.V("${var.foo}") + ref := dyn.V("${var.foo}") nv, err := FromTyped(src, ref) require.NoError(t, err) assert.Equal(t, dyn.V("${var.foo}"), nv) @@ -557,14 +557,14 @@ func TestFromTypedBoolVariableReference(t *testing.T) { func TestFromTypedBoolTypeError(t *testing.T) { var src bool = true - var ref = dyn.V("string") + ref := dyn.V("string") _, err := FromTyped(src, ref) require.Error(t, err) } func TestFromTypedIntEmpty(t *testing.T) { var src int - var ref = dyn.NilValue + ref := dyn.NilValue nv, err := FromTyped(src, ref) require.NoError(t, err) assert.Equal(t, dyn.NilValue, nv) @@ -572,7 +572,7 @@ func TestFromTypedIntEmpty(t *testing.T) { func TestFromTypedIntEmptyOverwrite(t *testing.T) { var src int - var ref = dyn.V(1234) + ref := dyn.V(1234) nv, err := FromTyped(src, ref) require.NoError(t, err) assert.Equal(t, dyn.V(int64(0)), nv) @@ -580,7 +580,7 @@ func TestFromTypedIntEmptyOverwrite(t *testing.T) { func TestFromTypedIntNonEmpty(t *testing.T) { var src int = 1234 - var ref = dyn.NilValue + ref := dyn.NilValue nv, err := FromTyped(src, ref) require.NoError(t, err) assert.Equal(t, dyn.V(int64(1234)), nv) @@ -588,14 +588,14 @@ func TestFromTypedIntNonEmpty(t *testing.T) { func TestFromTypedIntNonEmptyOverwrite(t *testing.T) { var src int = 1234 - var ref = dyn.V(1233) + ref := dyn.V(1233) nv, err := FromTyped(src, ref) require.NoError(t, err) assert.Equal(t, dyn.V(int64(1234)), nv) } func TestFromTypedIntRetainsLocations(t *testing.T) { - var ref = dyn.NewValue(1234, []dyn.Location{{File: "foo"}}) + ref := dyn.NewValue(1234, []dyn.Location{{File: "foo"}}) // case: value has not been changed var src int = 1234 @@ -612,7 +612,7 @@ func TestFromTypedIntRetainsLocations(t *testing.T) { func TestFromTypedIntVariableReference(t *testing.T) { var src int = 1234 - var ref = dyn.V("${var.foo}") + ref := dyn.V("${var.foo}") nv, err := FromTyped(src, ref) require.NoError(t, err) assert.Equal(t, dyn.V("${var.foo}"), nv) @@ -620,14 +620,14 @@ func TestFromTypedIntVariableReference(t *testing.T) { func TestFromTypedIntTypeError(t *testing.T) { var src int = 1234 - var ref = dyn.V("string") + ref := dyn.V("string") _, err := FromTyped(src, ref) require.Error(t, err) } func TestFromTypedFloatEmpty(t *testing.T) { var src float64 - var ref = dyn.NilValue + ref := dyn.NilValue nv, err := FromTyped(src, ref) require.NoError(t, err) assert.Equal(t, dyn.NilValue, nv) @@ -635,7 +635,7 @@ func TestFromTypedFloatEmpty(t *testing.T) { func TestFromTypedFloatEmptyOverwrite(t *testing.T) { var src float64 - var ref = dyn.V(1.23) + ref := dyn.V(1.23) nv, err := FromTyped(src, ref) require.NoError(t, err) assert.Equal(t, dyn.V(0.0), nv) @@ -643,7 +643,7 @@ func TestFromTypedFloatEmptyOverwrite(t *testing.T) { func TestFromTypedFloatNonEmpty(t *testing.T) { var src float64 = 1.23 - var ref = dyn.NilValue + ref := dyn.NilValue nv, err := FromTyped(src, ref) require.NoError(t, err) assert.Equal(t, dyn.V(1.23), nv) @@ -651,7 +651,7 @@ func TestFromTypedFloatNonEmpty(t *testing.T) { func TestFromTypedFloatNonEmptyOverwrite(t *testing.T) { var src float64 = 1.23 - var ref = dyn.V(1.24) + ref := dyn.V(1.24) nv, err := FromTyped(src, ref) require.NoError(t, err) assert.Equal(t, dyn.V(1.23), nv) @@ -659,7 +659,7 @@ func TestFromTypedFloatNonEmptyOverwrite(t *testing.T) { func TestFromTypedFloatRetainsLocations(t *testing.T) { var src float64 - var ref = dyn.NewValue(1.23, []dyn.Location{{File: "foo"}}) + ref := dyn.NewValue(1.23, []dyn.Location{{File: "foo"}}) // case: value has not been changed src = 1.23 @@ -676,7 +676,7 @@ func TestFromTypedFloatRetainsLocations(t *testing.T) { func TestFromTypedFloatVariableReference(t *testing.T) { var src float64 = 1.23 - var ref = dyn.V("${var.foo}") + ref := dyn.V("${var.foo}") nv, err := FromTyped(src, ref) require.NoError(t, err) assert.Equal(t, dyn.V("${var.foo}"), nv) @@ -684,7 +684,7 @@ func TestFromTypedFloatVariableReference(t *testing.T) { func TestFromTypedFloatTypeError(t *testing.T) { var src float64 = 1.23 - var ref = dyn.V("string") + ref := dyn.V("string") _, err := FromTyped(src, ref) require.Error(t, err) } @@ -727,7 +727,7 @@ func TestFromTypedAny(t *testing.T) { func TestFromTypedAnyNil(t *testing.T) { var src any = nil - var ref = dyn.NilValue + ref := dyn.NilValue nv, err := FromTyped(src, ref) require.NoError(t, err) assert.Equal(t, dyn.NilValue, nv) diff --git a/libs/dyn/convert/normalize.go b/libs/dyn/convert/normalize.go index 106add35d..ee26d5afc 100644 --- a/libs/dyn/convert/normalize.go +++ b/libs/dyn/convert/normalize.go @@ -97,7 +97,7 @@ func (n normalizeOptions) normalizeStruct(typ reflect.Type, src dyn.Value, seen if !pv.IsAnchor() { diags = diags.Append(diag.Diagnostic{ Severity: diag.Warning, - Summary: fmt.Sprintf("unknown field: %s", pk.MustString()), + Summary: "unknown field: " + pk.MustString(), // Show all locations the unknown field is defined at. Locations: pk.Locations(), Paths: []dyn.Path{path}, @@ -116,7 +116,7 @@ func (n normalizeOptions) normalizeStruct(typ reflect.Type, src dyn.Value, seen } } - out.Set(pk, nv) + out.Set(pk, nv) //nolint:errcheck } // Return the normalized value if missing fields are not included. @@ -162,7 +162,7 @@ func (n normalizeOptions) normalizeStruct(typ reflect.Type, src dyn.Value, seen continue } if v.IsValid() { - out.Set(dyn.V(k), v) + out.Set(dyn.V(k), v) // nolint:errcheck } } @@ -201,7 +201,7 @@ func (n normalizeOptions) normalizeMap(typ reflect.Type, src dyn.Value, seen []r } } - out.Set(pk, nv) + out.Set(pk, nv) //nolint:errcheck } return dyn.NewValue(out, src.Locations()), diags diff --git a/libs/dyn/convert/normalize_test.go b/libs/dyn/convert/normalize_test.go index ab0a1cec1..449c09075 100644 --- a/libs/dyn/convert/normalize_test.go +++ b/libs/dyn/convert/normalize_test.go @@ -6,6 +6,7 @@ import ( "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/dyn" assert "github.com/databricks/cli/libs/dyn/dynassert" + "github.com/stretchr/testify/require" ) func TestNormalizeStruct(t *testing.T) { @@ -20,8 +21,8 @@ func TestNormalizeStruct(t *testing.T) { "bar": dyn.V("baz"), }) - vout, err := Normalize(typ, vin) - assert.Empty(t, err) + vout, diags := Normalize(typ, vin) + assert.Empty(t, diags) assert.Equal(t, vin, vout) } @@ -37,14 +38,14 @@ func TestNormalizeStructElementDiagnostic(t *testing.T) { "bar": dyn.V(map[string]dyn.Value{"an": dyn.V("error")}), }) - vout, err := Normalize(typ, vin) - assert.Len(t, err, 1) + vout, diags := Normalize(typ, vin) + assert.Len(t, diags, 1) assert.Equal(t, diag.Diagnostic{ Severity: diag.Warning, Summary: `expected string, found map`, Locations: []dyn.Location{{}}, Paths: []dyn.Path{dyn.NewPath(dyn.Key("bar"))}, - }, err[0]) + }, diags[0]) // Elements that encounter an error during normalization are dropped. assert.Equal(t, map[string]any{ @@ -60,17 +61,20 @@ func TestNormalizeStructUnknownField(t *testing.T) { var typ Tmp m := dyn.NewMapping() - m.Set(dyn.V("foo"), dyn.V("val-foo")) + err := m.Set(dyn.V("foo"), dyn.V("val-foo")) + require.NoError(t, err) + // Set the unknown field, with location information. - m.Set(dyn.NewValue("bar", []dyn.Location{ + err = m.Set(dyn.NewValue("bar", []dyn.Location{ {File: "hello.yaml", Line: 1, Column: 1}, {File: "world.yaml", Line: 2, Column: 2}, }), dyn.V("var-bar")) + require.NoError(t, err) vin := dyn.V(m) - vout, err := Normalize(typ, vin) - assert.Len(t, err, 1) + vout, diags := Normalize(typ, vin) + assert.Len(t, diags, 1) assert.Equal(t, diag.Diagnostic{ Severity: diag.Warning, Summary: `unknown field: bar`, @@ -80,7 +84,7 @@ func TestNormalizeStructUnknownField(t *testing.T) { {File: "world.yaml", Line: 2, Column: 2}, }, Paths: []dyn.Path{dyn.EmptyPath}, - }, err[0]) + }, diags[0]) // The field that can be mapped to the struct field is retained. assert.Equal(t, map[string]any{ diff --git a/libs/dyn/convert/struct_info.go b/libs/dyn/convert/struct_info.go index dc3ed4da4..1e34008e2 100644 --- a/libs/dyn/convert/struct_info.go +++ b/libs/dyn/convert/struct_info.go @@ -43,7 +43,7 @@ func getStructInfo(typ reflect.Type) structInfo { // buildStructInfo populates a new [structInfo] for the given type. func buildStructInfo(typ reflect.Type) structInfo { - var out = structInfo{ + out := structInfo{ Fields: make(map[string][]int), } @@ -65,7 +65,7 @@ func buildStructInfo(typ reflect.Type) structInfo { } nf := styp.NumField() - for j := 0; j < nf; j++ { + for j := range nf { sf := styp.Field(j) // Recurse into anonymous fields. @@ -102,7 +102,7 @@ func buildStructInfo(typ reflect.Type) structInfo { } func (s *structInfo) FieldValues(v reflect.Value) map[string]reflect.Value { - var out = make(map[string]reflect.Value) + out := make(map[string]reflect.Value) for k, index := range s.Fields { fv := v diff --git a/libs/dyn/convert/struct_info_test.go b/libs/dyn/convert/struct_info_test.go index 20348ff60..bc10db9da 100644 --- a/libs/dyn/convert/struct_info_test.go +++ b/libs/dyn/convert/struct_info_test.go @@ -95,7 +95,7 @@ func TestStructInfoFieldValues(t *testing.T) { Bar string `json:"bar"` } - var src = Tmp{ + src := Tmp{ Foo: "foo", Bar: "bar", } @@ -121,7 +121,7 @@ func TestStructInfoFieldValuesAnonymousByValue(t *testing.T) { Foo } - var src = Tmp{ + src := Tmp{ Foo: Foo{ Foo: "foo", Bar: Bar{ diff --git a/libs/dyn/convert/to_typed_test.go b/libs/dyn/convert/to_typed_test.go index 78221c299..4a56dd4fc 100644 --- a/libs/dyn/convert/to_typed_test.go +++ b/libs/dyn/convert/to_typed_test.go @@ -44,7 +44,7 @@ func TestToTypedStructOverwrite(t *testing.T) { Qux string `json:"-"` } - var out = Tmp{ + out := Tmp{ Foo: "baz", Bar: "qux", } @@ -66,7 +66,7 @@ func TestToTypedStructClearFields(t *testing.T) { } // Struct value with non-empty fields. - var out = Tmp{ + out := Tmp{ Foo: "baz", Bar: "qux", } @@ -137,7 +137,7 @@ func TestToTypedStructNil(t *testing.T) { Foo string `json:"foo"` } - var out = Tmp{} + out := Tmp{} err := ToTyped(&out, dyn.NilValue) require.NoError(t, err) assert.Equal(t, Tmp{}, out) @@ -148,7 +148,7 @@ func TestToTypedStructNilOverwrite(t *testing.T) { Foo string `json:"foo"` } - var out = Tmp{"bar"} + out := Tmp{"bar"} err := ToTyped(&out, dyn.NilValue) require.NoError(t, err) assert.Equal(t, Tmp{}, out) @@ -173,7 +173,7 @@ func TestToTypedStructWithValueField(t *testing.T) { } func TestToTypedMap(t *testing.T) { - var out = map[string]string{} + out := map[string]string{} v := dyn.V(map[string]dyn.Value{ "key": dyn.V("value"), @@ -186,7 +186,7 @@ func TestToTypedMap(t *testing.T) { } func TestToTypedMapOverwrite(t *testing.T) { - var out = map[string]string{ + out := map[string]string{ "foo": "bar", } @@ -214,14 +214,14 @@ func TestToTypedMapWithPointerElement(t *testing.T) { } func TestToTypedMapNil(t *testing.T) { - var out = map[string]string{} + out := map[string]string{} err := ToTyped(&out, dyn.NilValue) require.NoError(t, err) assert.Nil(t, out) } func TestToTypedMapNilOverwrite(t *testing.T) { - var out = map[string]string{ + out := map[string]string{ "foo": "bar", } err := ToTyped(&out, dyn.NilValue) @@ -245,7 +245,7 @@ func TestToTypedSlice(t *testing.T) { } func TestToTypedSliceOverwrite(t *testing.T) { - var out = []string{"qux"} + out := []string{"qux"} v := dyn.V([]dyn.Value{ dyn.V("foo"), @@ -282,7 +282,7 @@ func TestToTypedSliceNil(t *testing.T) { } func TestToTypedSliceNilOverwrite(t *testing.T) { - var out = []string{"foo"} + out := []string{"foo"} err := ToTyped(&out, dyn.NilValue) require.NoError(t, err) assert.Nil(t, out) diff --git a/libs/dyn/dynassert/assert.go b/libs/dyn/dynassert/assert.go index ebdba1214..616a588ec 100644 --- a/libs/dyn/dynassert/assert.go +++ b/libs/dyn/dynassert/assert.go @@ -5,7 +5,7 @@ import ( "github.com/stretchr/testify/assert" ) -func Equal(t assert.TestingT, expected any, actual any, msgAndArgs ...any) bool { +func Equal(t assert.TestingT, expected, actual any, msgAndArgs ...any) bool { ev, eok := expected.(dyn.Value) av, aok := actual.(dyn.Value) if eok && aok && ev.IsValid() && av.IsValid() { @@ -36,7 +36,7 @@ func EqualValues(t assert.TestingT, expected, actual any, msgAndArgs ...any) boo return assert.EqualValues(t, expected, actual, msgAndArgs...) } -func NotEqual(t assert.TestingT, expected any, actual any, msgAndArgs ...any) bool { +func NotEqual(t assert.TestingT, expected, actual any, msgAndArgs ...any) bool { return assert.NotEqual(t, expected, actual, msgAndArgs...) } @@ -84,11 +84,11 @@ func False(t assert.TestingT, value bool, msgAndArgs ...any) bool { return assert.False(t, value, msgAndArgs...) } -func Contains(t assert.TestingT, list any, element any, msgAndArgs ...any) bool { +func Contains(t assert.TestingT, list, element any, msgAndArgs ...any) bool { return assert.Contains(t, list, element, msgAndArgs...) } -func NotContains(t assert.TestingT, list any, element any, msgAndArgs ...any) bool { +func NotContains(t assert.TestingT, list, element any, msgAndArgs ...any) bool { return assert.NotContains(t, list, element, msgAndArgs...) } @@ -112,6 +112,6 @@ func NotPanics(t assert.TestingT, f func(), msgAndArgs ...any) bool { return assert.NotPanics(t, f, msgAndArgs...) } -func JSONEq(t assert.TestingT, expected string, actual string, msgAndArgs ...any) bool { +func JSONEq(t assert.TestingT, expected, actual string, msgAndArgs ...any) bool { return assert.JSONEq(t, expected, actual, msgAndArgs...) } diff --git a/libs/dyn/dynassert/assert_test.go b/libs/dyn/dynassert/assert_test.go index 43258bd20..c8c2d6960 100644 --- a/libs/dyn/dynassert/assert_test.go +++ b/libs/dyn/dynassert/assert_test.go @@ -13,7 +13,7 @@ import ( ) func TestThatThisTestPackageIsUsed(t *testing.T) { - var base = ".." + base := ".." var files []string err := fs.WalkDir(os.DirFS(base), ".", func(path string, d fs.DirEntry, err error) error { if d.IsDir() { diff --git a/libs/dyn/jsonloader/json.go b/libs/dyn/jsonloader/json.go index cbf539263..708fc401f 100644 --- a/libs/dyn/jsonloader/json.go +++ b/libs/dyn/jsonloader/json.go @@ -3,6 +3,7 @@ package jsonloader import ( "bytes" "encoding/json" + "errors" "fmt" "io" @@ -20,7 +21,7 @@ func LoadJSON(data []byte, source string) (dyn.Value, error) { value, err := decodeValue(decoder, &offset) if err != nil { if err == io.EOF { - err = fmt.Errorf("unexpected end of JSON input") + err = errors.New("unexpected end of JSON input") } return dyn.InvalidValue, fmt.Errorf("error decoding JSON at %s: %v", value.Location(), err) } @@ -57,7 +58,7 @@ func decodeValue(decoder *json.Decoder, o *Offset) (dyn.Value, error) { } key, ok := keyToken.(string) if !ok { - return invalidValueWithLocation(decoder, o), fmt.Errorf("expected string for object key") + return invalidValueWithLocation(decoder, o), errors.New("expected string for object key") } // Get the offset of the key by subtracting the length of the key and the '"' character @@ -70,7 +71,7 @@ func decodeValue(decoder *json.Decoder, o *Offset) (dyn.Value, error) { return invalidValueWithLocation(decoder, o), err } - obj.Set(keyVal, val) + obj.Set(keyVal, val) //nolint:errcheck } // Consume the closing '}' if _, err := decoder.Token(); err != nil { diff --git a/libs/dyn/jsonsaver/marshal_test.go b/libs/dyn/jsonsaver/marshal_test.go index 0b6a34283..e8897ea49 100644 --- a/libs/dyn/jsonsaver/marshal_test.go +++ b/libs/dyn/jsonsaver/marshal_test.go @@ -5,6 +5,7 @@ import ( "github.com/databricks/cli/libs/dyn" assert "github.com/databricks/cli/libs/dyn/dynassert" + "github.com/stretchr/testify/require" ) func TestMarshal_String(t *testing.T) { @@ -44,8 +45,8 @@ func TestMarshal_Time(t *testing.T) { func TestMarshal_Map(t *testing.T) { m := dyn.NewMapping() - m.Set(dyn.V("key1"), dyn.V("value1")) - m.Set(dyn.V("key2"), dyn.V("value2")) + require.NoError(t, m.Set(dyn.V("key1"), dyn.V("value1"))) + require.NoError(t, m.Set(dyn.V("key2"), dyn.V("value2"))) b, err := Marshal(dyn.V(m)) if assert.NoError(t, err) { @@ -66,16 +67,16 @@ func TestMarshal_Sequence(t *testing.T) { func TestMarshal_Complex(t *testing.T) { map1 := dyn.NewMapping() - map1.Set(dyn.V("str1"), dyn.V("value1")) - map1.Set(dyn.V("str2"), dyn.V("value2")) + require.NoError(t, map1.Set(dyn.V("str1"), dyn.V("value1"))) + require.NoError(t, map1.Set(dyn.V("str2"), dyn.V("value2"))) seq1 := []dyn.Value{} seq1 = append(seq1, dyn.V("value1")) seq1 = append(seq1, dyn.V("value2")) root := dyn.NewMapping() - root.Set(dyn.V("map1"), dyn.V(map1)) - root.Set(dyn.V("seq1"), dyn.V(seq1)) + require.NoError(t, root.Set(dyn.V("map1"), dyn.V(map1))) + require.NoError(t, root.Set(dyn.V("seq1"), dyn.V(seq1))) // Marshal without indent. b, err := Marshal(dyn.V(root)) diff --git a/libs/dyn/location.go b/libs/dyn/location.go index 961d2f121..d2b2ad596 100644 --- a/libs/dyn/location.go +++ b/libs/dyn/location.go @@ -1,6 +1,7 @@ package dyn import ( + "errors" "fmt" "path/filepath" ) @@ -17,7 +18,7 @@ func (l Location) String() string { func (l Location) Directory() (string, error) { if l.File == "" { - return "", fmt.Errorf("no file in location") + return "", errors.New("no file in location") } return filepath.Dir(l.File), nil diff --git a/libs/dyn/mapping.go b/libs/dyn/mapping.go index f9f2d2e97..3c7c4e96e 100644 --- a/libs/dyn/mapping.go +++ b/libs/dyn/mapping.go @@ -41,7 +41,7 @@ func newMappingWithSize(size int) Mapping { func newMappingFromGoMap(vin map[string]Value) Mapping { m := newMappingWithSize(len(vin)) for k, v := range vin { - m.Set(V(k), v) + m.Set(V(k), v) //nolint:errcheck } return m } @@ -94,7 +94,7 @@ func (m *Mapping) GetByString(skey string) (Value, bool) { // If the key already exists, the value is updated. // If the key does not exist, a new key-value pair is added. // The key must be a string, otherwise an error is returned. -func (m *Mapping) Set(key Value, value Value) error { +func (m *Mapping) Set(key, value Value) error { skey, ok := key.AsString() if !ok { return fmt.Errorf("key must be a string, got %s", key.Kind()) @@ -144,6 +144,6 @@ func (m Mapping) Clone() Mapping { // Merge merges the key-value pairs from another Mapping into the current Mapping. func (m *Mapping) Merge(n Mapping) { for _, p := range n.pairs { - m.Set(p.Key, p.Value) + m.Set(p.Key, p.Value) //nolint:errcheck } } diff --git a/libs/dyn/mapping_test.go b/libs/dyn/mapping_test.go index 43b24b0c5..d0347d22a 100644 --- a/libs/dyn/mapping_test.go +++ b/libs/dyn/mapping_test.go @@ -1,7 +1,7 @@ package dyn_test import ( - "fmt" + "strconv" "testing" "github.com/databricks/cli/libs/dyn" @@ -185,14 +185,14 @@ func TestMappingClone(t *testing.T) { func TestMappingMerge(t *testing.T) { var m1 dyn.Mapping - for i := 0; i < 10; i++ { - err := m1.Set(dyn.V(fmt.Sprintf("%d", i)), dyn.V(i)) + for i := range 10 { + err := m1.Set(dyn.V(strconv.Itoa(i)), dyn.V(i)) require.NoError(t, err) } var m2 dyn.Mapping for i := 5; i < 15; i++ { - err := m2.Set(dyn.V(fmt.Sprintf("%d", i)), dyn.V(i)) + err := m2.Set(dyn.V(strconv.Itoa(i)), dyn.V(i)) require.NoError(t, err) } diff --git a/libs/dyn/merge/merge.go b/libs/dyn/merge/merge.go index 29decd779..72d9a7d28 100644 --- a/libs/dyn/merge/merge.go +++ b/libs/dyn/merge/merge.go @@ -88,10 +88,10 @@ func mergeMap(a, b dyn.Value) (dyn.Value, error) { if err != nil { return dyn.InvalidValue, err } - out.Set(pk, merged) + out.Set(pk, merged) //nolint:errcheck } else { // Otherwise, just set the value. - out.Set(pk, pv) + out.Set(pk, pv) //nolint:errcheck } } @@ -111,6 +111,7 @@ func mergeSequence(a, b dyn.Value) (dyn.Value, error) { // Preserve the location of the first value. Accumulate the locations of the second value. return dyn.NewValue(out, a.Locations()).AppendLocationsFromValue(b), nil } + func mergePrimitive(a, b dyn.Value) (dyn.Value, error) { // Merging primitive values means using the incoming value. return b.AppendLocationsFromValue(a), nil diff --git a/libs/dyn/merge/merge_test.go b/libs/dyn/merge/merge_test.go index 4a4bf9e6c..bfe772016 100644 --- a/libs/dyn/merge/merge_test.go +++ b/libs/dyn/merge/merge_test.go @@ -75,7 +75,6 @@ func TestMergeMaps(t *testing.T) { assert.Equal(t, l1, out.Get("foo").Location()) assert.Equal(t, l2, out.Get("qux").Location()) } - } func TestMergeMapsNil(t *testing.T) { diff --git a/libs/dyn/merge/override.go b/libs/dyn/merge/override.go index 7a8667cd6..1e49d5544 100644 --- a/libs/dyn/merge/override.go +++ b/libs/dyn/merge/override.go @@ -23,7 +23,7 @@ import ( type OverrideVisitor struct { VisitDelete func(valuePath dyn.Path, left dyn.Value) error VisitInsert func(valuePath dyn.Path, right dyn.Value) (dyn.Value, error) - VisitUpdate func(valuePath dyn.Path, left dyn.Value, right dyn.Value) (dyn.Value, error) + VisitUpdate func(valuePath dyn.Path, left, right dyn.Value) (dyn.Value, error) } var ErrOverrideUndoDelete = errors.New("undo delete operation") @@ -31,11 +31,11 @@ var ErrOverrideUndoDelete = errors.New("undo delete operation") // Override overrides value 'leftRoot' with 'rightRoot', keeping 'location' if values // haven't changed. Preserving 'location' is important to preserve the original source of the value // for error reporting. -func Override(leftRoot dyn.Value, rightRoot dyn.Value, visitor OverrideVisitor) (dyn.Value, error) { +func Override(leftRoot, rightRoot dyn.Value, visitor OverrideVisitor) (dyn.Value, error) { return override(dyn.EmptyPath, leftRoot, rightRoot, visitor) } -func override(basePath dyn.Path, left dyn.Value, right dyn.Value, visitor OverrideVisitor) (dyn.Value, error) { +func override(basePath dyn.Path, left, right dyn.Value, visitor OverrideVisitor) (dyn.Value, error) { if left.Kind() != right.Kind() { return visitor.VisitUpdate(basePath, left, right) } @@ -46,7 +46,6 @@ func override(basePath dyn.Path, left dyn.Value, right dyn.Value, visitor Overri switch left.Kind() { case dyn.KindMap: merged, err := overrideMapping(basePath, left.MustMap(), right.MustMap(), visitor) - if err != nil { return dyn.InvalidValue, err } @@ -57,7 +56,6 @@ func override(basePath dyn.Path, left dyn.Value, right dyn.Value, visitor Overri // some sequences are keyed, and we can detect which elements are added/removed/updated, // but we don't have this information merged, err := overrideSequence(basePath, left.MustSequence(), right.MustSequence(), visitor) - if err != nil { return dyn.InvalidValue, err } @@ -107,7 +105,7 @@ func override(basePath dyn.Path, left dyn.Value, right dyn.Value, visitor Overri return dyn.InvalidValue, fmt.Errorf("unexpected kind %s at %s", left.Kind(), basePath.String()) } -func overrideMapping(basePath dyn.Path, leftMapping dyn.Mapping, rightMapping dyn.Mapping, visitor OverrideVisitor) (dyn.Mapping, error) { +func overrideMapping(basePath dyn.Path, leftMapping, rightMapping dyn.Mapping, visitor OverrideVisitor) (dyn.Mapping, error) { out := dyn.NewMapping() for _, leftPair := range leftMapping.Pairs() { @@ -136,14 +134,12 @@ func overrideMapping(basePath dyn.Path, leftMapping dyn.Mapping, rightMapping dy if leftPair, ok := leftMapping.GetPair(rightPair.Key); ok { path := basePath.Append(dyn.Key(rightPair.Key.MustString())) newValue, err := override(path, leftPair.Value, rightPair.Value, visitor) - if err != nil { return dyn.NewMapping(), err } // key was there before, so keep its location err = out.Set(leftPair.Key, newValue) - if err != nil { return dyn.NewMapping(), err } @@ -151,13 +147,11 @@ func overrideMapping(basePath dyn.Path, leftMapping dyn.Mapping, rightMapping dy path := basePath.Append(dyn.Key(rightPair.Key.MustString())) newValue, err := visitor.VisitInsert(path, rightPair.Value) - if err != nil { return dyn.NewMapping(), err } err = out.Set(rightPair.Key, newValue) - if err != nil { return dyn.NewMapping(), err } @@ -167,14 +161,13 @@ func overrideMapping(basePath dyn.Path, leftMapping dyn.Mapping, rightMapping dy return out, nil } -func overrideSequence(basePath dyn.Path, left []dyn.Value, right []dyn.Value, visitor OverrideVisitor) ([]dyn.Value, error) { +func overrideSequence(basePath dyn.Path, left, right []dyn.Value, visitor OverrideVisitor) ([]dyn.Value, error) { minLen := min(len(left), len(right)) var values []dyn.Value - for i := 0; i < minLen; i++ { + for i := range minLen { path := basePath.Append(dyn.Index(i)) merged, err := override(path, left[i], right[i], visitor) - if err != nil { return nil, err } @@ -186,7 +179,6 @@ func overrideSequence(basePath dyn.Path, left []dyn.Value, right []dyn.Value, vi for i := minLen; i < len(right); i++ { path := basePath.Append(dyn.Index(i)) newValue, err := visitor.VisitInsert(path, right[i]) - if err != nil { return nil, err } diff --git a/libs/dyn/merge/override_test.go b/libs/dyn/merge/override_test.go index 264c32e5e..d9d3f3983 100644 --- a/libs/dyn/merge/override_test.go +++ b/libs/dyn/merge/override_test.go @@ -1,7 +1,7 @@ package merge import ( - "fmt" + "errors" "testing" "time" @@ -373,7 +373,7 @@ func TestOverride_Primitive(t *testing.T) { if modified { t.Run(tc.name+" - visitor has error", func(t *testing.T) { - _, visitor := createVisitor(visitorOpts{error: fmt.Errorf("unexpected change in test")}) + _, visitor := createVisitor(visitorOpts{error: errors.New("unexpected change in test")}) _, err := override(dyn.EmptyPath, tc.left, tc.right, visitor) assert.EqualError(t, err, "unexpected change in test") @@ -432,10 +432,12 @@ func TestOverride_PreserveMappingKeys(t *testing.T) { rightValueLocation := dyn.Location{File: "right.yml", Line: 3, Column: 1} left := dyn.NewMapping() - left.Set(dyn.NewValue("a", []dyn.Location{leftKeyLocation}), dyn.NewValue(42, []dyn.Location{leftValueLocation})) + err := left.Set(dyn.NewValue("a", []dyn.Location{leftKeyLocation}), dyn.NewValue(42, []dyn.Location{leftValueLocation})) + require.NoError(t, err) right := dyn.NewMapping() - right.Set(dyn.NewValue("a", []dyn.Location{rightKeyLocation}), dyn.NewValue(7, []dyn.Location{rightValueLocation})) + err = right.Set(dyn.NewValue("a", []dyn.Location{rightKeyLocation}), dyn.NewValue(7, []dyn.Location{rightValueLocation})) + require.NoError(t, err) state, visitor := createVisitor(visitorOpts{}) @@ -482,7 +484,7 @@ func createVisitor(opts visitorOpts) (*visitorState, OverrideVisitor) { s := visitorState{} return &s, OverrideVisitor{ - VisitUpdate: func(valuePath dyn.Path, left dyn.Value, right dyn.Value) (dyn.Value, error) { + VisitUpdate: func(valuePath dyn.Path, left, right dyn.Value) (dyn.Value, error) { s.updated = append(s.updated, valuePath.String()) if opts.error != nil { diff --git a/libs/dyn/path.go b/libs/dyn/path.go index 76377e2dc..25bff5070 100644 --- a/libs/dyn/path.go +++ b/libs/dyn/path.go @@ -65,7 +65,7 @@ func (p Path) Equal(q Path) bool { if pl != ql { return false } - for i := 0; i < pl; i++ { + for i := range pl { if p[i] != q[i] { return false } @@ -81,7 +81,7 @@ func (p Path) HasPrefix(q Path) bool { if pl < ql { return false } - for i := 0; i < ql; i++ { + for i := range ql { if p[i] != q[i] { return false } diff --git a/libs/dyn/path_string_test.go b/libs/dyn/path_string_test.go index 0d64bf110..eb1816d7d 100644 --- a/libs/dyn/path_string_test.go +++ b/libs/dyn/path_string_test.go @@ -1,7 +1,7 @@ package dyn_test import ( - "fmt" + "errors" "testing" . "github.com/databricks/cli/libs/dyn" @@ -52,31 +52,31 @@ func TestNewPathFromString(t *testing.T) { }, { input: "foo[123", - err: fmt.Errorf("invalid path: foo[123"), + err: errors.New("invalid path: foo[123"), }, { input: "foo[123]]", - err: fmt.Errorf("invalid path: foo[123]]"), + err: errors.New("invalid path: foo[123]]"), }, { input: "foo[[123]", - err: fmt.Errorf("invalid path: foo[[123]"), + err: errors.New("invalid path: foo[[123]"), }, { input: "foo[[123]]", - err: fmt.Errorf("invalid path: foo[[123]]"), + err: errors.New("invalid path: foo[[123]]"), }, { input: "foo[foo]", - err: fmt.Errorf("invalid path: foo[foo]"), + err: errors.New("invalid path: foo[foo]"), }, { input: "foo..bar", - err: fmt.Errorf("invalid path: foo..bar"), + err: errors.New("invalid path: foo..bar"), }, { input: "foo.bar.", - err: fmt.Errorf("invalid path: foo.bar."), + err: errors.New("invalid path: foo.bar."), }, { // Every component may have a leading dot. @@ -86,7 +86,7 @@ func TestNewPathFromString(t *testing.T) { { // But after an index there must be a dot. input: "foo[1]bar", - err: fmt.Errorf("invalid path: foo[1]bar"), + err: errors.New("invalid path: foo[1]bar"), }, } { p, err := NewPathFromString(tc.input) diff --git a/libs/dyn/pattern.go b/libs/dyn/pattern.go index aecdc3ca6..2d2e9cae7 100644 --- a/libs/dyn/pattern.go +++ b/libs/dyn/pattern.go @@ -69,7 +69,7 @@ func (c anyKeyComponent) visit(v Value, prefix Path, suffix Pattern, opts visitO return InvalidValue, err } - m.Set(pk, nv) + m.Set(pk, nv) //nolint:errcheck } return NewValue(m, v.Locations()), nil diff --git a/libs/dyn/value_test.go b/libs/dyn/value_test.go index 6a0a27b8d..86e65858e 100644 --- a/libs/dyn/value_test.go +++ b/libs/dyn/value_test.go @@ -25,11 +25,11 @@ func TestValueAsMap(t *testing.T) { _, ok := zeroValue.AsMap() assert.False(t, ok) - var intValue = dyn.V(1) + intValue := dyn.V(1) _, ok = intValue.AsMap() assert.False(t, ok) - var mapValue = dyn.NewValue( + mapValue := dyn.NewValue( map[string]dyn.Value{ "key": dyn.NewValue( "value", @@ -46,6 +46,6 @@ func TestValueAsMap(t *testing.T) { func TestValueIsValid(t *testing.T) { var zeroValue dyn.Value assert.False(t, zeroValue.IsValid()) - var intValue = dyn.V(1) + intValue := dyn.V(1) assert.True(t, intValue.IsValid()) } diff --git a/libs/dyn/visit.go b/libs/dyn/visit.go index 38adec24f..95515115e 100644 --- a/libs/dyn/visit.go +++ b/libs/dyn/visit.go @@ -122,7 +122,7 @@ func (component pathComponent) visit(v Value, prefix Path, suffix Pattern, opts // Return an updated map value. m = m.Clone() - m.Set(V(component.key), nv) + m.Set(V(component.key), nv) //nolint:errcheck return Value{ v: m, k: KindMap, diff --git a/libs/dyn/visit_map.go b/libs/dyn/visit_map.go index 3f0cded03..db4526038 100644 --- a/libs/dyn/visit_map.go +++ b/libs/dyn/visit_map.go @@ -25,7 +25,7 @@ func Foreach(fn MapFunc) MapFunc { if err != nil { return InvalidValue, err } - m.Set(pk, nv) + m.Set(pk, nv) //nolint:errcheck } return NewValue(m, v.Locations()), nil case KindSequence: diff --git a/libs/dyn/visit_map_test.go b/libs/dyn/visit_map_test.go index 2cea0913b..ad091743d 100644 --- a/libs/dyn/visit_map_test.go +++ b/libs/dyn/visit_map_test.go @@ -1,6 +1,7 @@ package dyn_test import ( + "errors" "fmt" "testing" @@ -71,7 +72,7 @@ func TestMapFuncOnMap(t *testing.T) { }, vbar.AsAny()) // Return error from map function. - var ref = fmt.Errorf("error") + ref := errors.New("error") verr, err := dyn.MapByPath(vin, dyn.NewPath(dyn.Key("foo")), func(_ dyn.Path, v dyn.Value) (dyn.Value, error) { return dyn.InvalidValue, ref }) @@ -87,12 +88,12 @@ func TestMapFuncOnMapWithEmptySequence(t *testing.T) { dyn.V([]dyn.Value{dyn.V(42)}), } - for i := 0; i < len(variants); i++ { + for i := range variants { vin := dyn.V(map[string]dyn.Value{ "key": variants[i], }) - for j := 0; j < len(variants); j++ { + for j := range variants { vout, err := dyn.MapByPath(vin, dyn.NewPath(dyn.Key("key")), func(_ dyn.Path, v dyn.Value) (dyn.Value, error) { return variants[j], nil }) @@ -137,7 +138,7 @@ func TestMapFuncOnSequence(t *testing.T) { assert.Equal(t, []any{42, 45}, v1.AsAny()) // Return error from map function. - var ref = fmt.Errorf("error") + ref := errors.New("error") verr, err := dyn.MapByPath(vin, dyn.NewPath(dyn.Index(0)), func(_ dyn.Path, v dyn.Value) (dyn.Value, error) { return dyn.InvalidValue, ref }) @@ -153,12 +154,12 @@ func TestMapFuncOnSequenceWithEmptySequence(t *testing.T) { dyn.V([]dyn.Value{dyn.V(42)}), } - for i := 0; i < len(variants); i++ { + for i := range variants { vin := dyn.V([]dyn.Value{ variants[i], }) - for j := 0; j < len(variants); j++ { + for j := range variants { vout, err := dyn.MapByPath(vin, dyn.NewPath(dyn.Index(0)), func(_ dyn.Path, v dyn.Value) (dyn.Value, error) { return variants[j], nil }) @@ -211,7 +212,7 @@ func TestMapForeachOnMapError(t *testing.T) { }) // Check that an error from the map function propagates. - var ref = fmt.Errorf("error") + ref := errors.New("error") _, err := dyn.Map(vin, ".", dyn.Foreach(func(_ dyn.Path, v dyn.Value) (dyn.Value, error) { return dyn.InvalidValue, ref })) @@ -255,7 +256,7 @@ func TestMapForeachOnSequenceError(t *testing.T) { }) // Check that an error from the map function propagates. - var ref = fmt.Errorf("error") + ref := errors.New("error") _, err := dyn.Map(vin, ".", dyn.Foreach(func(_ dyn.Path, v dyn.Value) (dyn.Value, error) { return dyn.InvalidValue, ref })) diff --git a/libs/dyn/visit_set.go b/libs/dyn/visit_set.go index b086fb8a9..9991d311f 100644 --- a/libs/dyn/visit_set.go +++ b/libs/dyn/visit_set.go @@ -41,7 +41,7 @@ func SetByPath(v Value, p Path, nv Value) (Value, error) { // Return an updated map value. m = m.Clone() - m.Set(V(component.key), nv) + m.Set(V(component.key), nv) //nolint:errcheck return Value{ v: m, k: KindMap, diff --git a/libs/dyn/walk.go b/libs/dyn/walk.go index c51a11e22..b3576e088 100644 --- a/libs/dyn/walk.go +++ b/libs/dyn/walk.go @@ -45,7 +45,7 @@ func walk(v Value, p Path, fn func(p Path, v Value) (Value, error)) (Value, erro if err != nil { return InvalidValue, err } - out.Set(pk, nv) + out.Set(pk, nv) //nolint:errcheck } v.v = out case KindSequence: diff --git a/libs/dyn/yamlloader/loader.go b/libs/dyn/yamlloader/loader.go index a77ee0744..fe58d6dfb 100644 --- a/libs/dyn/yamlloader/loader.go +++ b/libs/dyn/yamlloader/loader.go @@ -129,7 +129,7 @@ func (d *loader) loadMapping(node *yaml.Node, loc dyn.Location) (dyn.Value, erro return dyn.InvalidValue, err } - acc.Set(k, v) + acc.Set(k, v) //nolint:errcheck } if merge == nil { @@ -137,8 +137,8 @@ func (d *loader) loadMapping(node *yaml.Node, loc dyn.Location) (dyn.Value, erro } // Build location for the merge node. - var mloc = d.location(merge) - var merr = errorf(mloc, "map merge requires map or sequence of maps as the value") + mloc := d.location(merge) + merr := errorf(mloc, "map merge requires map or sequence of maps as the value") // Flatten the merge node into a slice of nodes. // It can be either a single node or a sequence of nodes. diff --git a/libs/dyn/yamlloader/yaml_spec_test.go b/libs/dyn/yamlloader/yaml_spec_test.go index 2a5ae817f..d9997f702 100644 --- a/libs/dyn/yamlloader/yaml_spec_test.go +++ b/libs/dyn/yamlloader/yaml_spec_test.go @@ -777,7 +777,8 @@ func TestYAMLSpecExample_2_27(t *testing.T) { ), }, []dyn.Location{{File: file, Line: 22, Column: 3}}, - )}, + ), + }, []dyn.Location{{File: file, Line: 18, Column: 1}}, ), "tax": dyn.NewValue( diff --git a/libs/dyn/yamlsaver/saver.go b/libs/dyn/yamlsaver/saver.go index 0fd81d534..a7838ff36 100644 --- a/libs/dyn/yamlsaver/saver.go +++ b/libs/dyn/yamlsaver/saver.go @@ -27,7 +27,7 @@ func NewSaverWithStyle(nodesWithStyle map[string]yaml.Style) *saver { } func (s *saver) SaveAsYAML(data any, filename string, force bool) error { - err := os.MkdirAll(filepath.Dir(filename), 0755) + err := os.MkdirAll(filepath.Dir(filename), 0o755) if err != nil { return err } @@ -123,9 +123,9 @@ func (s *saver) toYamlNodeWithStyle(v dyn.Value, style yaml.Style) (*yaml.Node, } return &yaml.Node{Kind: yaml.ScalarNode, Value: v.MustString(), Style: style}, nil case dyn.KindBool: - return &yaml.Node{Kind: yaml.ScalarNode, Value: fmt.Sprint(v.MustBool()), Style: style}, nil + return &yaml.Node{Kind: yaml.ScalarNode, Value: strconv.FormatBool(v.MustBool()), Style: style}, nil case dyn.KindInt: - return &yaml.Node{Kind: yaml.ScalarNode, Value: fmt.Sprint(v.MustInt()), Style: style}, nil + return &yaml.Node{Kind: yaml.ScalarNode, Value: strconv.FormatInt(v.MustInt(), 10), Style: style}, nil case dyn.KindFloat: return &yaml.Node{Kind: yaml.ScalarNode, Value: fmt.Sprint(v.MustFloat()), Style: style}, nil case dyn.KindTime: diff --git a/libs/dyn/yamlsaver/saver_test.go b/libs/dyn/yamlsaver/saver_test.go index aa481c20b..89bd5c31e 100644 --- a/libs/dyn/yamlsaver/saver_test.go +++ b/libs/dyn/yamlsaver/saver_test.go @@ -11,7 +11,7 @@ import ( func TestMarshalNilValue(t *testing.T) { s := NewSaver() - var nilValue = dyn.NilValue + nilValue := dyn.NilValue v, err := s.toYamlNode(nilValue) assert.NoError(t, err) assert.Equal(t, "null", v.Value) @@ -19,7 +19,7 @@ func TestMarshalNilValue(t *testing.T) { func TestMarshalIntValue(t *testing.T) { s := NewSaver() - var intValue = dyn.V(1) + intValue := dyn.V(1) v, err := s.toYamlNode(intValue) assert.NoError(t, err) assert.Equal(t, "1", v.Value) @@ -28,7 +28,7 @@ func TestMarshalIntValue(t *testing.T) { func TestMarshalFloatValue(t *testing.T) { s := NewSaver() - var floatValue = dyn.V(1.0) + floatValue := dyn.V(1.0) v, err := s.toYamlNode(floatValue) assert.NoError(t, err) assert.Equal(t, "1", v.Value) @@ -37,7 +37,7 @@ func TestMarshalFloatValue(t *testing.T) { func TestMarshalBoolValue(t *testing.T) { s := NewSaver() - var boolValue = dyn.V(true) + boolValue := dyn.V(true) v, err := s.toYamlNode(boolValue) assert.NoError(t, err) assert.Equal(t, "true", v.Value) @@ -49,7 +49,7 @@ func TestMarshalTimeValue(t *testing.T) { require.NoError(t, err) s := NewSaver() - var timeValue = dyn.V(tm) + timeValue := dyn.V(tm) v, err := s.toYamlNode(timeValue) assert.NoError(t, err) assert.Equal(t, "1970-01-01", v.Value) @@ -58,7 +58,7 @@ func TestMarshalTimeValue(t *testing.T) { func TestMarshalSequenceValue(t *testing.T) { s := NewSaver() - var sequenceValue = dyn.NewValue( + sequenceValue := dyn.NewValue( []dyn.Value{ dyn.NewValue("value1", []dyn.Location{{File: "file", Line: 1, Column: 2}}), dyn.NewValue("value2", []dyn.Location{{File: "file", Line: 2, Column: 2}}), @@ -74,7 +74,7 @@ func TestMarshalSequenceValue(t *testing.T) { func TestMarshalStringValue(t *testing.T) { s := NewSaver() - var stringValue = dyn.V("value") + stringValue := dyn.V("value") v, err := s.toYamlNode(stringValue) assert.NoError(t, err) assert.Equal(t, "value", v.Value) @@ -83,7 +83,7 @@ func TestMarshalStringValue(t *testing.T) { func TestMarshalMapValue(t *testing.T) { s := NewSaver() - var mapValue = dyn.NewValue( + mapValue := dyn.NewValue( map[string]dyn.Value{ "key3": dyn.NewValue("value3", []dyn.Location{{File: "file", Line: 3, Column: 2}}), "key2": dyn.NewValue("value2", []dyn.Location{{File: "file", Line: 2, Column: 2}}), @@ -107,7 +107,7 @@ func TestMarshalMapValue(t *testing.T) { func TestMarshalNestedValues(t *testing.T) { s := NewSaver() - var mapValue = dyn.NewValue( + mapValue := dyn.NewValue( map[string]dyn.Value{ "key1": dyn.NewValue( map[string]dyn.Value{ @@ -129,14 +129,14 @@ func TestMarshalNestedValues(t *testing.T) { func TestMarshalHexadecimalValueIsQuoted(t *testing.T) { s := NewSaver() - var hexValue = dyn.V(0x123) + hexValue := dyn.V(0x123) v, err := s.toYamlNode(hexValue) assert.NoError(t, err) assert.Equal(t, "291", v.Value) assert.Equal(t, yaml.Style(0), v.Style) assert.Equal(t, yaml.ScalarNode, v.Kind) - var stringValue = dyn.V("0x123") + stringValue := dyn.V("0x123") v, err = s.toYamlNode(stringValue) assert.NoError(t, err) assert.Equal(t, "0x123", v.Value) @@ -146,14 +146,14 @@ func TestMarshalHexadecimalValueIsQuoted(t *testing.T) { func TestMarshalBinaryValueIsQuoted(t *testing.T) { s := NewSaver() - var binaryValue = dyn.V(0b101) + binaryValue := dyn.V(0b101) v, err := s.toYamlNode(binaryValue) assert.NoError(t, err) assert.Equal(t, "5", v.Value) assert.Equal(t, yaml.Style(0), v.Style) assert.Equal(t, yaml.ScalarNode, v.Kind) - var stringValue = dyn.V("0b101") + stringValue := dyn.V("0b101") v, err = s.toYamlNode(stringValue) assert.NoError(t, err) assert.Equal(t, "0b101", v.Value) @@ -163,14 +163,14 @@ func TestMarshalBinaryValueIsQuoted(t *testing.T) { func TestMarshalOctalValueIsQuoted(t *testing.T) { s := NewSaver() - var octalValue = dyn.V(0123) + octalValue := dyn.V(0o123) v, err := s.toYamlNode(octalValue) assert.NoError(t, err) assert.Equal(t, "83", v.Value) assert.Equal(t, yaml.Style(0), v.Style) assert.Equal(t, yaml.ScalarNode, v.Kind) - var stringValue = dyn.V("0123") + stringValue := dyn.V("0123") v, err = s.toYamlNode(stringValue) assert.NoError(t, err) assert.Equal(t, "0123", v.Value) @@ -180,14 +180,14 @@ func TestMarshalOctalValueIsQuoted(t *testing.T) { func TestMarshalFloatValueIsQuoted(t *testing.T) { s := NewSaver() - var floatValue = dyn.V(1.0) + floatValue := dyn.V(1.0) v, err := s.toYamlNode(floatValue) assert.NoError(t, err) assert.Equal(t, "1", v.Value) assert.Equal(t, yaml.Style(0), v.Style) assert.Equal(t, yaml.ScalarNode, v.Kind) - var stringValue = dyn.V("1.0") + stringValue := dyn.V("1.0") v, err = s.toYamlNode(stringValue) assert.NoError(t, err) assert.Equal(t, "1.0", v.Value) @@ -197,14 +197,14 @@ func TestMarshalFloatValueIsQuoted(t *testing.T) { func TestMarshalBoolValueIsQuoted(t *testing.T) { s := NewSaver() - var boolValue = dyn.V(true) + boolValue := dyn.V(true) v, err := s.toYamlNode(boolValue) assert.NoError(t, err) assert.Equal(t, "true", v.Value) assert.Equal(t, yaml.Style(0), v.Style) assert.Equal(t, yaml.ScalarNode, v.Kind) - var stringValue = dyn.V("true") + stringValue := dyn.V("true") v, err = s.toYamlNode(stringValue) assert.NoError(t, err) assert.Equal(t, "true", v.Value) @@ -217,7 +217,7 @@ func TestCustomStylingWithNestedMap(t *testing.T) { "styled": yaml.DoubleQuotedStyle, }) - var styledMap = dyn.NewValue( + styledMap := dyn.NewValue( map[string]dyn.Value{ "key1": dyn.NewValue("value1", []dyn.Location{{File: "file", Line: 1, Column: 2}}), "key2": dyn.NewValue("value2", []dyn.Location{{File: "file", Line: 2, Column: 2}}), @@ -225,7 +225,7 @@ func TestCustomStylingWithNestedMap(t *testing.T) { []dyn.Location{{File: "file", Line: -2, Column: 2}}, ) - var unstyledMap = dyn.NewValue( + unstyledMap := dyn.NewValue( map[string]dyn.Value{ "key3": dyn.NewValue("value3", []dyn.Location{{File: "file", Line: 1, Column: 2}}), "key4": dyn.NewValue("value4", []dyn.Location{{File: "file", Line: 2, Column: 2}}), @@ -233,7 +233,7 @@ func TestCustomStylingWithNestedMap(t *testing.T) { []dyn.Location{{File: "file", Line: -1, Column: 2}}, ) - var val = dyn.NewValue( + val := dyn.NewValue( map[string]dyn.Value{ "styled": styledMap, "unstyled": unstyledMap, diff --git a/libs/env/context.go b/libs/env/context.go index af4d1afa0..37b76147a 100644 --- a/libs/env/context.go +++ b/libs/env/context.go @@ -65,7 +65,7 @@ func Set(ctx context.Context, key, value string) context.Context { return setMap(ctx, m) } -func homeEnvVar() string { +func HomeEnvVar() string { if runtime.GOOS == "windows" { return "USERPROFILE" } @@ -73,14 +73,14 @@ func homeEnvVar() string { } func WithUserHomeDir(ctx context.Context, value string) context.Context { - return Set(ctx, homeEnvVar(), value) + return Set(ctx, HomeEnvVar(), value) } // ErrNoHomeEnv indicates the absence of $HOME env variable var ErrNoHomeEnv = errors.New("$HOME is not set") func UserHomeDir(ctx context.Context) (string, error) { - home := Get(ctx, homeEnvVar()) + home := Get(ctx, HomeEnvVar()) if home == "" { return "", ErrNoHomeEnv } diff --git a/libs/env/loader.go b/libs/env/loader.go index f441ffa15..74c54cee8 100644 --- a/libs/env/loader.go +++ b/libs/env/loader.go @@ -43,7 +43,9 @@ func (le *configLoader) Configure(cfg *config.Config) error { if v == "" { continue } - a.Set(cfg, v) + if err := a.Set(cfg, v); err != nil { + return err + } } } return nil diff --git a/libs/errs/aggregate_test.go b/libs/errs/aggregate_test.go index a307e956f..216276a06 100644 --- a/libs/errs/aggregate_test.go +++ b/libs/errs/aggregate_test.go @@ -2,36 +2,35 @@ package errs import ( "errors" - "fmt" "testing" "github.com/stretchr/testify/assert" ) func TestFromManyErrors(t *testing.T) { - e1 := fmt.Errorf("Error 1") - e2 := fmt.Errorf("Error 2") - e3 := fmt.Errorf("Error 3") + e1 := errors.New("Error 1") + e2 := errors.New("Error 2") + e3 := errors.New("Error 3") err := FromMany(e1, e2, e3) - assert.True(t, errors.Is(err, e1)) - assert.True(t, errors.Is(err, e2)) - assert.True(t, errors.Is(err, e3)) + assert.ErrorIs(t, err, e1) + assert.ErrorIs(t, err, e2) + assert.ErrorIs(t, err, e3) - assert.Equal(t, err.Error(), `Error 1 + assert.Equal(t, `Error 1 Error 2 -Error 3`) +Error 3`, err.Error()) } func TestFromManyErrorsWihtNil(t *testing.T) { - e1 := fmt.Errorf("Error 1") + e1 := errors.New("Error 1") var e2 error = nil - e3 := fmt.Errorf("Error 3") + e3 := errors.New("Error 3") err := FromMany(e1, e2, e3) - assert.True(t, errors.Is(err, e1)) - assert.True(t, errors.Is(err, e3)) + assert.ErrorIs(t, err, e1) + assert.ErrorIs(t, err, e3) - assert.Equal(t, err.Error(), `Error 1 -Error 3`) + assert.Equal(t, `Error 1 +Error 3`, err.Error()) } diff --git a/libs/exec/exec.go b/libs/exec/exec.go index 8e4633271..466117e60 100644 --- a/libs/exec/exec.go +++ b/libs/exec/exec.go @@ -10,9 +10,11 @@ import ( type ExecutableType string -const BashExecutable ExecutableType = `bash` -const ShExecutable ExecutableType = `sh` -const CmdExecutable ExecutableType = `cmd` +const ( + BashExecutable ExecutableType = `bash` + ShExecutable ExecutableType = `sh` + CmdExecutable ExecutableType = `cmd` +) var finders map[ExecutableType](func() (shell, error)) = map[ExecutableType](func() (shell, error)){ BashExecutable: newBashShell, diff --git a/libs/exec/exec_test.go b/libs/exec/exec_test.go index ad54601d0..c363c1f7c 100644 --- a/libs/exec/exec_test.go +++ b/libs/exec/exec_test.go @@ -12,6 +12,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestExecutorWithSimpleInput(t *testing.T) { @@ -86,9 +87,11 @@ func testExecutorWithShell(t *testing.T, shell string) { tmpDir := t.TempDir() t.Setenv("PATH", tmpDir) if runtime.GOOS == "windows" { - os.Symlink(p, fmt.Sprintf("%s/%s.exe", tmpDir, shell)) + err = os.Symlink(p, fmt.Sprintf("%s/%s.exe", tmpDir, shell)) + require.NoError(t, err) } else { - os.Symlink(p, fmt.Sprintf("%s/%s", tmpDir, shell)) + err = os.Symlink(p, fmt.Sprintf("%s/%s", tmpDir, shell)) + require.NoError(t, err) } executor, err := NewCommandExecutor(".") @@ -138,7 +141,7 @@ func TestMultipleCommandsRunInParrallel(t *testing.T) { const count = 5 var wg sync.WaitGroup - for i := 0; i < count; i++ { + for i := range count { wg.Add(1) cmd, err := executor.StartCommand(context.Background(), fmt.Sprintf("echo 'Hello %d'", i)) go func(cmd Command, i int) { diff --git a/libs/exec/shell.go b/libs/exec/shell.go index f5d176896..ee29eac8a 100644 --- a/libs/exec/shell.go +++ b/libs/exec/shell.go @@ -36,7 +36,7 @@ func findShell() (shell, error) { return nil, errors.New("no shell found") } -func createTempScript(command string, extension string) (string, error) { +func createTempScript(command, extension string) (string, error) { file, err := os.CreateTemp(os.TempDir(), "cli-exec*"+extension) if err != nil { return "", err diff --git a/libs/exec/shell_cmd.go b/libs/exec/shell_cmd.go index 164d09739..057ed06a4 100644 --- a/libs/exec/shell_cmd.go +++ b/libs/exec/shell_cmd.go @@ -2,7 +2,6 @@ package exec import ( "errors" - "fmt" osexec "os/exec" ) @@ -18,7 +17,7 @@ func (s cmdShell) prepare(command string) (*execContext, error) { return &execContext{ executable: s.executable, - args: []string{"/D", "/E:ON", "/V:OFF", "/S", "/C", fmt.Sprintf(`CALL %s`, filename)}, + args: []string{"/D", "/E:ON", "/V:OFF", "/S", "/C", "CALL " + filename}, scriptFile: filename, }, nil } diff --git a/libs/fakefs/fakefs.go b/libs/fakefs/fakefs.go index a8d5eb873..050ee2d6e 100644 --- a/libs/fakefs/fakefs.go +++ b/libs/fakefs/fakefs.go @@ -1,12 +1,12 @@ package fakefs import ( - "fmt" + "errors" "io/fs" "time" ) -var ErrNotImplemented = fmt.Errorf("not implemented") +var ErrNotImplemented = errors.New("not implemented") // DirEntry is a fake implementation of [fs.DirEntry]. type DirEntry struct { diff --git a/libs/filer/completer/completer_test.go b/libs/filer/completer/completer_test.go index d284447b9..865d34c2f 100644 --- a/libs/filer/completer/completer_test.go +++ b/libs/filer/completer/completer_test.go @@ -37,7 +37,7 @@ func TestFilerCompleterSetsPrefix(t *testing.T) { assert.Equal(t, []string{"dbfs:/dir/dirA/", "dbfs:/dir/dirB/"}, completions) assert.Equal(t, cobra.ShellCompDirectiveNoSpace, directive) - assert.Nil(t, err) + assert.NoError(t, err) } func TestFilerCompleterReturnsNestedDirs(t *testing.T) { @@ -46,7 +46,7 @@ func TestFilerCompleterReturnsNestedDirs(t *testing.T) { assert.Equal(t, []string{"dir/dirA/", "dir/dirB/"}, completions) assert.Equal(t, cobra.ShellCompDirectiveNoSpace, directive) - assert.Nil(t, err) + assert.NoError(t, err) } func TestFilerCompleterReturnsAdjacentDirs(t *testing.T) { @@ -55,7 +55,7 @@ func TestFilerCompleterReturnsAdjacentDirs(t *testing.T) { assert.Equal(t, []string{"dir/dirA/", "dir/dirB/"}, completions) assert.Equal(t, cobra.ShellCompDirectiveNoSpace, directive) - assert.Nil(t, err) + assert.NoError(t, err) } func TestFilerCompleterReturnsNestedDirsAndFiles(t *testing.T) { @@ -64,7 +64,7 @@ func TestFilerCompleterReturnsNestedDirsAndFiles(t *testing.T) { assert.Equal(t, []string{"dir/dirA/", "dir/dirB/", "dir/fileA"}, completions) assert.Equal(t, cobra.ShellCompDirectiveNoSpace, directive) - assert.Nil(t, err) + assert.NoError(t, err) } func TestFilerCompleterAddsDbfsPath(t *testing.T) { @@ -78,7 +78,7 @@ func TestFilerCompleterAddsDbfsPath(t *testing.T) { assert.Equal(t, []string{"dir/dirA/", "dir/dirB/", "dbfs:/"}, completions) assert.Equal(t, cobra.ShellCompDirectiveNoSpace, directive) - assert.Nil(t, err) + assert.NoError(t, err) } func TestFilerCompleterWindowsSeparator(t *testing.T) { @@ -92,7 +92,7 @@ func TestFilerCompleterWindowsSeparator(t *testing.T) { assert.Equal(t, []string{"dir\\dirA\\", "dir\\dirB\\", "dbfs:/"}, completions) assert.Equal(t, cobra.ShellCompDirectiveNoSpace, directive) - assert.Nil(t, err) + assert.NoError(t, err) } func TestFilerCompleterNoCompletions(t *testing.T) { diff --git a/libs/filer/fake_filer.go b/libs/filer/fake_filer.go index 76b8bcd94..1e1cbd985 100644 --- a/libs/filer/fake_filer.go +++ b/libs/filer/fake_filer.go @@ -2,7 +2,7 @@ package filer import ( "context" - "fmt" + "errors" "io" "io/fs" "path" @@ -17,7 +17,7 @@ type FakeFiler struct { } func (f *FakeFiler) Write(ctx context.Context, p string, reader io.Reader, mode ...WriteMode) error { - return fmt.Errorf("not implemented") + return errors.New("not implemented") } func (f *FakeFiler) Read(ctx context.Context, p string) (io.ReadCloser, error) { @@ -30,7 +30,7 @@ func (f *FakeFiler) Read(ctx context.Context, p string) (io.ReadCloser, error) { } func (f *FakeFiler) Delete(ctx context.Context, p string, mode ...DeleteMode) error { - return fmt.Errorf("not implemented") + return errors.New("not implemented") } func (f *FakeFiler) ReadDir(ctx context.Context, p string) ([]fs.DirEntry, error) { @@ -59,7 +59,7 @@ func (f *FakeFiler) ReadDir(ctx context.Context, p string) ([]fs.DirEntry, error } func (f *FakeFiler) Mkdir(ctx context.Context, path string) error { - return fmt.Errorf("not implemented") + return errors.New("not implemented") } func (f *FakeFiler) Stat(ctx context.Context, path string) (fs.FileInfo, error) { diff --git a/libs/filer/filer.go b/libs/filer/filer.go index b5be4c3c2..372c82929 100644 --- a/libs/filer/filer.go +++ b/libs/filer/filer.go @@ -2,7 +2,6 @@ package filer import ( "context" - "fmt" "io" "io/fs" ) @@ -36,7 +35,7 @@ type FileAlreadyExistsError struct { } func (err FileAlreadyExistsError) Error() string { - return fmt.Sprintf("file already exists: %s", err.path) + return "file already exists: " + err.path } func (err FileAlreadyExistsError) Is(other error) bool { @@ -52,7 +51,7 @@ func (err FileDoesNotExistError) Is(other error) bool { } func (err FileDoesNotExistError) Error() string { - return fmt.Sprintf("file does not exist: %s", err.path) + return "file does not exist: " + err.path } type NoSuchDirectoryError struct { @@ -60,7 +59,7 @@ type NoSuchDirectoryError struct { } func (err NoSuchDirectoryError) Error() string { - return fmt.Sprintf("no such directory: %s", err.path) + return "no such directory: " + err.path } func (err NoSuchDirectoryError) Is(other error) bool { @@ -72,7 +71,7 @@ type NotADirectory struct { } func (err NotADirectory) Error() string { - return fmt.Sprintf("not a directory: %s", err.path) + return "not a directory: " + err.path } func (err NotADirectory) Is(other error) bool { @@ -84,7 +83,7 @@ type NotAFile struct { } func (err NotAFile) Error() string { - return fmt.Sprintf("not a file: %s", err.path) + return "not a file: " + err.path } func (err NotAFile) Is(other error) bool { @@ -96,15 +95,14 @@ type DirectoryNotEmptyError struct { } func (err DirectoryNotEmptyError) Error() string { - return fmt.Sprintf("directory not empty: %s", err.path) + return "directory not empty: " + err.path } func (err DirectoryNotEmptyError) Is(other error) bool { return other == fs.ErrInvalid } -type CannotDeleteRootError struct { -} +type CannotDeleteRootError struct{} func (err CannotDeleteRootError) Error() string { return "unable to delete filer root" @@ -119,7 +117,7 @@ type PermissionError struct { } func (err PermissionError) Error() string { - return fmt.Sprintf("access denied: %s", err.path) + return "access denied: " + err.path } func (err PermissionError) Is(other error) bool { diff --git a/libs/filer/files_client.go b/libs/filer/files_client.go index 7ea1d0f03..98a534684 100644 --- a/libs/filer/files_client.go +++ b/libs/filer/files_client.go @@ -116,10 +116,7 @@ func (w *FilesClient) urlPath(name string) (string, string, error) { } // The user specified part of the path must be escaped. - urlPath := fmt.Sprintf( - "/api/2.0/fs/files/%s", - url.PathEscape(strings.TrimLeft(absPath, "/")), - ) + urlPath := "/api/2.0/fs/files/" + url.PathEscape(strings.TrimLeft(absPath, "/")) return absPath, urlPath, nil } diff --git a/libs/filer/fs_test.go b/libs/filer/fs_test.go index 849cf6f7c..6168af39a 100644 --- a/libs/filer/fs_test.go +++ b/libs/filer/fs_test.go @@ -63,7 +63,7 @@ func TestFsOpenFile(t *testing.T) { assert.Equal(t, "fileA", info.Name()) assert.Equal(t, int64(3), info.Size()) assert.Equal(t, fs.FileMode(0), info.Mode()) - assert.Equal(t, false, info.IsDir()) + assert.False(t, info.IsDir()) // Read until closed. b := make([]byte, 3) @@ -91,7 +91,7 @@ func TestFsOpenDir(t *testing.T) { info, err := fakeFile.Stat() require.NoError(t, err) assert.Equal(t, "root", info.Name()) - assert.Equal(t, true, info.IsDir()) + assert.True(t, info.IsDir()) de, ok := fakeFile.(fs.ReadDirFile) require.True(t, ok) @@ -107,7 +107,7 @@ func TestFsOpenDir(t *testing.T) { de.Close() - for i := 0; i < 3; i++ { + for range 3 { tmp, err = de.ReadDir(1) require.NoError(t, err) entries = append(entries, tmp...) diff --git a/libs/filer/local_client.go b/libs/filer/local_client.go index 8b25345fc..385aa6924 100644 --- a/libs/filer/local_client.go +++ b/libs/filer/local_client.go @@ -29,7 +29,7 @@ func (w *LocalClient) Write(ctx context.Context, name string, reader io.Reader, } // Retrieve permission mask from the [WriteMode], if present. - perm := fs.FileMode(0644) + perm := fs.FileMode(0o644) for _, m := range mode { bits := m & writeModePerm if bits != 0 { @@ -47,7 +47,7 @@ func (w *LocalClient) Write(ctx context.Context, name string, reader io.Reader, f, err := os.OpenFile(absPath, flags, perm) if errors.Is(err, fs.ErrNotExist) && slices.Contains(mode, CreateParentDirectories) { // Create parent directories if they don't exist. - err = os.MkdirAll(filepath.Dir(absPath), 0755) + err = os.MkdirAll(filepath.Dir(absPath), 0o755) if err != nil { return err } @@ -73,7 +73,6 @@ func (w *LocalClient) Write(ctx context.Context, name string, reader io.Reader, } return err - } func (w *LocalClient) Read(ctx context.Context, name string) (io.ReadCloser, error) { @@ -159,7 +158,7 @@ func (w *LocalClient) Mkdir(ctx context.Context, name string) error { return err } - return os.MkdirAll(dirPath, 0755) + return os.MkdirAll(dirPath, 0o755) } func (w *LocalClient) Stat(ctx context.Context, name string) (fs.FileInfo, error) { diff --git a/libs/filer/slice_test.go b/libs/filer/slice_test.go index 21d783483..2bdb3f7f5 100644 --- a/libs/filer/slice_test.go +++ b/libs/filer/slice_test.go @@ -12,11 +12,10 @@ func TestSliceWithout(t *testing.T) { assert.Equal(t, []int{2, 3}, sliceWithout([]int{1, 2, 3}, 1)) assert.Equal(t, []int{1, 3}, sliceWithout([]int{1, 2, 3}, 2)) assert.Equal(t, []int{1, 2}, sliceWithout([]int{1, 2, 3}, 3)) - } func TestSliceWithoutReturnsClone(t *testing.T) { - var ints = []int{1, 2, 3} + ints := []int{1, 2, 3} assert.Equal(t, []int{2, 3}, sliceWithout(ints, 1)) assert.Equal(t, []int{1, 2, 3}, ints) } diff --git a/libs/filer/workspace_files_cache_test.go b/libs/filer/workspace_files_cache_test.go index 8983c5982..a73f415c1 100644 --- a/libs/filer/workspace_files_cache_test.go +++ b/libs/filer/workspace_files_cache_test.go @@ -2,7 +2,7 @@ package filer import ( "context" - "fmt" + "errors" "io" "io/fs" "testing" @@ -11,7 +11,7 @@ import ( "github.com/stretchr/testify/assert" ) -var errNotImplemented = fmt.Errorf("not implemented") +var errNotImplemented = errors.New("not implemented") type cacheTestFiler struct { calls int diff --git a/libs/filer/workspace_files_client.go b/libs/filer/workspace_files_client.go index 9e0a7ce50..8d5148edd 100644 --- a/libs/filer/workspace_files_client.go +++ b/libs/filer/workspace_files_client.go @@ -195,7 +195,7 @@ func (w *WorkspaceFilesClient) Write(ctx context.Context, name string, reader io // This API returns 400 if the file already exists, when the object type is notebook regex := regexp.MustCompile(`Path \((.*)\) already exists.`) - if aerr.StatusCode == http.StatusBadRequest && regex.Match([]byte(aerr.Message)) { + if aerr.StatusCode == http.StatusBadRequest && regex.MatchString(aerr.Message) { // Parse file path from regex capture group matches := regex.FindStringSubmatch(aerr.Message) if len(matches) == 2 { diff --git a/libs/filer/workspace_files_extensions_client.go b/libs/filer/workspace_files_extensions_client.go index 2a6052091..9ee2722e1 100644 --- a/libs/filer/workspace_files_extensions_client.go +++ b/libs/filer/workspace_files_extensions_client.go @@ -52,7 +52,8 @@ func (w *workspaceFilesExtensionsClient) getNotebookStatByNameWithExt(ctx contex notebook.ExtensionR, notebook.ExtensionScala, notebook.ExtensionSql, - notebook.ExtensionJupyter}, ext) { + notebook.ExtensionJupyter, + }, ext) { return nil, nil } diff --git a/libs/filer/workspace_files_extensions_client_test.go b/libs/filer/workspace_files_extensions_client_test.go index 10c176b31..10a2bebf0 100644 --- a/libs/filer/workspace_files_extensions_client_test.go +++ b/libs/filer/workspace_files_extensions_client_test.go @@ -17,8 +17,9 @@ type mockApiClient struct { } func (m *mockApiClient) Do(ctx context.Context, method, path string, - headers map[string]string, request any, response any, - visitors ...func(*http.Request) error) error { + headers map[string]string, request, response any, + visitors ...func(*http.Request) error, +) error { args := m.Called(ctx, method, path, headers, request, response, visitors) // Set the http response from a value provided in the mock call. diff --git a/libs/fileset/glob_test.go b/libs/fileset/glob_test.go index 9eb786db9..0224b2547 100644 --- a/libs/fileset/glob_test.go +++ b/libs/fileset/glob_test.go @@ -52,7 +52,7 @@ func TestGlobFileset(t *testing.T) { files, err = g.Files() require.NoError(t, err) - require.Equal(t, len(files), 0) + require.Empty(t, files) } func TestGlobFilesetWithRelativeRoot(t *testing.T) { diff --git a/libs/flags/json_flag_test.go b/libs/flags/json_flag_test.go index 77530086a..4bebf8b68 100644 --- a/libs/flags/json_flag_test.go +++ b/libs/flags/json_flag_test.go @@ -1,7 +1,6 @@ package flags import ( - "fmt" "os" "path" "testing" @@ -57,17 +56,18 @@ func TestJsonFlagFile(t *testing.T) { var request any var fpath string - var payload = []byte(`{"foo": "bar"}`) + payload := []byte(`{"foo": "bar"}`) { f, err := os.Create(path.Join(t.TempDir(), "file")) require.NoError(t, err) - f.Write(payload) + _, err = f.Write(payload) + require.NoError(t, err) f.Close() fpath = f.Name() } - err := body.Set(fmt.Sprintf("@%s", fpath)) + err := body.Set("@" + fpath) require.NoError(t, err) diags := body.Unmarshal(&request) @@ -122,12 +122,12 @@ func TestJsonUnmarshalForRequest(t *testing.T) { assert.Equal(t, "new job", r.NewSettings.Name) assert.Equal(t, 0, r.NewSettings.TimeoutSeconds) assert.Equal(t, 1, r.NewSettings.MaxConcurrentRuns) - assert.Equal(t, 1, len(r.NewSettings.Tasks)) + assert.Len(t, r.NewSettings.Tasks, 1) assert.Equal(t, "new task", r.NewSettings.Tasks[0].TaskKey) assert.Equal(t, 0, r.NewSettings.Tasks[0].TimeoutSeconds) assert.Equal(t, 0, r.NewSettings.Tasks[0].MaxRetries) assert.Equal(t, 0, r.NewSettings.Tasks[0].MinRetryIntervalMillis) - assert.Equal(t, true, r.NewSettings.Tasks[0].RetryOnTimeout) + assert.True(t, r.NewSettings.Tasks[0].RetryOnTimeout) } const incorrectJsonData = `{ @@ -279,8 +279,8 @@ func TestJsonUnmarshalForRequestWithForceSendFields(t *testing.T) { assert.NoError(t, diags.Error()) assert.Empty(t, diags) - assert.Equal(t, false, r.NewSettings.NotificationSettings.NoAlertForSkippedRuns) - assert.Equal(t, false, r.NewSettings.NotificationSettings.NoAlertForCanceledRuns) + assert.False(t, r.NewSettings.NotificationSettings.NoAlertForSkippedRuns) + assert.False(t, r.NewSettings.NotificationSettings.NoAlertForCanceledRuns) assert.NotContains(t, r.NewSettings.NotificationSettings.ForceSendFields, "NoAlertForSkippedRuns") assert.Contains(t, r.NewSettings.NotificationSettings.ForceSendFields, "NoAlertForCanceledRuns") } diff --git a/libs/flags/log_file_flag.go b/libs/flags/log_file_flag.go index 9e60353f0..d2fe51d91 100644 --- a/libs/flags/log_file_flag.go +++ b/libs/flags/log_file_flag.go @@ -48,7 +48,7 @@ func (f *realLogFile) Writer() io.Writer { } func (f *realLogFile) Open() error { - file, err := os.OpenFile(f.s, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0600) + file, err := os.OpenFile(f.s, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0o600) if err != nil { return err } diff --git a/libs/flags/output.go b/libs/flags/output.go index 17da144bd..e0c799131 100644 --- a/libs/flags/output.go +++ b/libs/flags/output.go @@ -1,6 +1,7 @@ package flags import ( + "errors" "fmt" "strings" @@ -25,7 +26,7 @@ func (f *Output) Set(s string) error { case `json`, `text`: *f = Output(lower) default: - return fmt.Errorf("accepted arguments are json and text") + return errors.New("accepted arguments are json and text") } return nil } diff --git a/libs/flags/yaml_flag.go b/libs/flags/yaml_flag.go deleted file mode 100644 index 95cc9b4be..000000000 --- a/libs/flags/yaml_flag.go +++ /dev/null @@ -1,42 +0,0 @@ -package flags - -import ( - "fmt" - "os" - - "github.com/ghodss/yaml" -) - -type YamlFlag struct { - raw []byte -} - -func (y *YamlFlag) String() string { - return fmt.Sprintf("YAML (%d bytes)", len(y.raw)) -} - -// TODO: Command.MarkFlagFilename() -func (y *YamlFlag) Set(v string) error { - // Load request from file if it starts with '@' (like curl). - if v[0] != '@' { - y.raw = []byte(v) - return nil - } - buf, err := os.ReadFile(v[1:]) - if err != nil { - return fmt.Errorf("read %s: %w", v, err) - } - y.raw = buf - return nil -} - -func (y *YamlFlag) Unmarshal(v any) error { - if y.raw == nil { - return nil - } - return yaml.Unmarshal(y.raw, v) -} - -func (y *YamlFlag) Type() string { - return "YAML" -} diff --git a/libs/folders/folders.go b/libs/folders/folders.go index c83c711d3..bbabc588c 100644 --- a/libs/folders/folders.go +++ b/libs/folders/folders.go @@ -8,7 +8,11 @@ import ( // FindDirWithLeaf returns the first directory that holds `leaf`, // traversing up to the root of the filesystem, starting at `dir`. -func FindDirWithLeaf(dir string, leaf string) (string, error) { +func FindDirWithLeaf(dir, leaf string) (string, error) { + dir, err := filepath.Abs(dir) + if err != nil { + return "", err + } for { _, err := os.Stat(filepath.Join(dir, leaf)) diff --git a/libs/folders/folders_test.go b/libs/folders/folders_test.go index 17afc4022..d2afc4f2d 100644 --- a/libs/folders/folders_test.go +++ b/libs/folders/folders_test.go @@ -33,6 +33,6 @@ func TestFindDirWithLeaf(t *testing.T) { { out, err := FindDirWithLeaf(root, "this-leaf-doesnt-exist-anywhere") assert.ErrorIs(t, err, os.ErrNotExist) - assert.Equal(t, out, "") + assert.Equal(t, "", out) } } diff --git a/libs/git/config.go b/libs/git/config.go index fafd81bd6..f7ff057e1 100644 --- a/libs/git/config.go +++ b/libs/git/config.go @@ -155,8 +155,8 @@ func globalGitConfig() (*config, error) { // > are missing or unreadable they will be ignored. // // We therefore ignore the error return value for the calls below. - config.loadFile(vfs.MustNew(xdgConfigHome), "git/config") - config.loadFile(vfs.MustNew(config.home), ".gitconfig") + _ = config.loadFile(vfs.MustNew(xdgConfigHome), "git/config") + _ = config.loadFile(vfs.MustNew(config.home), ".gitconfig") return config, nil } diff --git a/libs/git/config_test.go b/libs/git/config_test.go index 3e6edf765..73f3431c9 100644 --- a/libs/git/config_test.go +++ b/libs/git/config_test.go @@ -113,7 +113,7 @@ func (h *testCoreExcludesHelper) initialize(t *testing.T) { t.Setenv("XDG_CONFIG_HOME", h.xdgConfigHome) xdgConfigHomeGit := filepath.Join(h.xdgConfigHome, "git") - err := os.MkdirAll(xdgConfigHomeGit, 0755) + err := os.MkdirAll(xdgConfigHomeGit, 0o755) require.NoError(t, err) } @@ -124,7 +124,7 @@ func (h *testCoreExcludesHelper) coreExcludesFile() (string, error) { } func (h *testCoreExcludesHelper) writeConfig(path, contents string) { - err := os.WriteFile(path, []byte(contents), 0644) + err := os.WriteFile(path, []byte(contents), 0o644) require.NoError(h, err) } diff --git a/libs/git/fileset_test.go b/libs/git/fileset_test.go index f4fd931fd..6d239edf5 100644 --- a/libs/git/fileset_test.go +++ b/libs/git/fileset_test.go @@ -56,7 +56,8 @@ func TestFileSetAddsCacheDirToGitIgnore(t *testing.T) { projectDir := t.TempDir() fileSet, err := NewFileSetAtRoot(vfs.MustNew(projectDir)) require.NoError(t, err) - fileSet.EnsureValidGitIgnoreExists() + err = fileSet.EnsureValidGitIgnoreExists() + require.NoError(t, err) gitIgnorePath := filepath.Join(projectDir, ".gitignore") assert.FileExists(t, gitIgnorePath) @@ -74,7 +75,8 @@ func TestFileSetDoesNotCacheDirToGitIgnoreIfAlreadyPresent(t *testing.T) { err = os.WriteFile(gitIgnorePath, []byte(".databricks"), 0o644) require.NoError(t, err) - fileSet.EnsureValidGitIgnoreExists() + err = fileSet.EnsureValidGitIgnoreExists() + require.NoError(t, err) b, err := os.ReadFile(gitIgnorePath) require.NoError(t, err) diff --git a/libs/git/ignore_test.go b/libs/git/ignore_test.go index 057c0cb2e..9e2713608 100644 --- a/libs/git/ignore_test.go +++ b/libs/git/ignore_test.go @@ -48,7 +48,7 @@ func TestIgnoreFileTaint(t *testing.T) { assert.False(t, ign) // Now create the .gitignore file. - err = os.WriteFile(gitIgnorePath, []byte("hello"), 0644) + err = os.WriteFile(gitIgnorePath, []byte("hello"), 0o644) require.NoError(t, err) // Verify that the match still doesn't happen (no spontaneous reload). diff --git a/libs/git/info.go b/libs/git/info.go index 13c298113..46e57be48 100644 --- a/libs/git/info.go +++ b/libs/git/info.go @@ -2,15 +2,12 @@ package git import ( "context" - "errors" - "io/fs" "net/http" - "os" "path" - "path/filepath" "strings" "github.com/databricks/cli/libs/dbr" + "github.com/databricks/cli/libs/folders" "github.com/databricks/cli/libs/log" "github.com/databricks/cli/libs/vfs" "github.com/databricks/databricks-sdk-go" @@ -75,7 +72,6 @@ func fetchRepositoryInfoAPI(ctx context.Context, path string, w *databricks.Work }, &response, ) - if err != nil { return result, err } @@ -105,7 +101,7 @@ func ensureWorkspacePrefix(p string) string { func fetchRepositoryInfoDotGit(ctx context.Context, path string) (RepositoryInfo, error) { result := RepositoryInfo{} - rootDir, err := findLeafInTree(path, GitDirectoryName) + rootDir, err := folders.FindDirWithLeaf(path, GitDirectoryName) if rootDir == "" { return result, err } @@ -134,28 +130,3 @@ func fetchRepositoryInfoDotGit(ctx context.Context, path string) (RepositoryInfo return result, nil } - -func findLeafInTree(p string, leafName string) (string, error) { - var err error - for i := 0; i < 10000; i++ { - _, err = os.Stat(filepath.Join(p, leafName)) - - if err == nil { - // Found [leafName] in p - return p, nil - } - - // ErrNotExist means we continue traversal up the tree. - if errors.Is(err, fs.ErrNotExist) { - parent := filepath.Dir(p) - if parent == p { - return "", nil - } - p = parent - continue - } - break - } - - return "", err -} diff --git a/libs/git/reference.go b/libs/git/reference.go index 2165a9cda..6001d70de 100644 --- a/libs/git/reference.go +++ b/libs/git/reference.go @@ -12,8 +12,10 @@ import ( type ReferenceType string -var ErrNotAReferencePointer = fmt.Errorf("HEAD does not point to another reference") -var ErrNotABranch = fmt.Errorf("HEAD is not a reference to a git branch") +var ( + ErrNotAReferencePointer = errors.New("HEAD does not point to another reference") + ErrNotABranch = errors.New("HEAD is not a reference to a git branch") +) const ( // pointer to a secondary reference file path containing sha-1 object ID. @@ -30,8 +32,10 @@ type Reference struct { Content string } -const ReferencePrefix = "ref: " -const HeadPathPrefix = "refs/heads/" +const ( + ReferencePrefix = "ref: " + HeadPathPrefix = "refs/heads/" +) // asserts if a string is a 40 character hexadecimal encoded string func isSHA1(s string) bool { diff --git a/libs/git/reference_test.go b/libs/git/reference_test.go index 194d79333..bfa0e50e5 100644 --- a/libs/git/reference_test.go +++ b/libs/git/reference_test.go @@ -54,7 +54,8 @@ func TestReferenceLoadingForObjectID(t *testing.T) { f, err := os.Create(filepath.Join(tmp, "HEAD")) require.NoError(t, err) defer f.Close() - f.WriteString(strings.Repeat("e", 40) + "\r\n") + _, err = f.WriteString(strings.Repeat("e", 40) + "\r\n") + require.NoError(t, err) ref, err := LoadReferenceFile(vfs.MustNew(tmp), "HEAD") assert.NoError(t, err) @@ -67,7 +68,8 @@ func TestReferenceLoadingForReference(t *testing.T) { f, err := os.OpenFile(filepath.Join(tmp, "HEAD"), os.O_CREATE|os.O_WRONLY, os.ModePerm) require.NoError(t, err) defer f.Close() - f.WriteString("ref: refs/heads/foo\n") + _, err = f.WriteString("ref: refs/heads/foo\n") + require.NoError(t, err) ref, err := LoadReferenceFile(vfs.MustNew(tmp), "HEAD") assert.NoError(t, err) @@ -80,7 +82,8 @@ func TestReferenceLoadingFailsForInvalidContent(t *testing.T) { f, err := os.OpenFile(filepath.Join(tmp, "HEAD"), os.O_CREATE|os.O_WRONLY, os.ModePerm) require.NoError(t, err) defer f.Close() - f.WriteString("abc") + _, err = f.WriteString("abc") + require.NoError(t, err) _, err = LoadReferenceFile(vfs.MustNew(tmp), "HEAD") assert.ErrorContains(t, err, "unknown format for git HEAD") diff --git a/libs/git/repository_test.go b/libs/git/repository_test.go index 93d9a03dc..58a540190 100644 --- a/libs/git/repository_test.go +++ b/libs/git/repository_test.go @@ -1,7 +1,6 @@ package git import ( - "fmt" "os" "path/filepath" "strings" @@ -27,7 +26,7 @@ func newTestRepository(t *testing.T) *testRepository { require.NoError(t, err) defer f1.Close() - f1.WriteString( + _, err = f1.WriteString( `[core] repositoryformatversion = 0 filemode = true @@ -36,6 +35,7 @@ func newTestRepository(t *testing.T) *testRepository { ignorecase = true precomposeunicode = true `) + require.NoError(t, err) f2, err := os.Create(filepath.Join(tmp, ".git", "HEAD")) require.NoError(t, err) @@ -62,7 +62,7 @@ func (testRepo *testRepository) checkoutCommit(commitId string) { require.NoError(testRepo.t, err) } -func (testRepo *testRepository) addBranch(name string, latestCommit string) { +func (testRepo *testRepository) addBranch(name, latestCommit string) { // create dir for branch head reference branchDir := filepath.Join(testRepo.r.Root(), ".git", "refs", "heads") err := os.MkdirAll(branchDir, os.ModePerm) @@ -95,8 +95,7 @@ func (testRepo *testRepository) addOriginUrl(url string) { defer f.Close() _, err = f.WriteString( - fmt.Sprintf(`[remote "origin"] - url = %s`, url)) + "[remote \"origin\"]\n\turl = " + url) require.NoError(testRepo.t, err) // reload config to reflect the remote url diff --git a/libs/git/view.go b/libs/git/view.go index 2eaba1f8b..db22dfc5d 100644 --- a/libs/git/view.go +++ b/libs/git/view.go @@ -113,7 +113,7 @@ func (v *View) EnsureValidGitIgnoreExists() error { // Create .gitignore with .databricks entry gitIgnorePath := filepath.Join(v.repo.Root(), v.targetPath, ".gitignore") - file, err := os.OpenFile(gitIgnorePath, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0644) + file, err := os.OpenFile(gitIgnorePath, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0o644) if err != nil { return err } diff --git a/libs/git/view_test.go b/libs/git/view_test.go index 06f6f9419..96881fdee 100644 --- a/libs/git/view_test.go +++ b/libs/git/view_test.go @@ -20,7 +20,7 @@ func copyTestdata(t *testing.T, name string) string { require.NoError(t, err) if d.IsDir() { - err := os.MkdirAll(filepath.Join(tempDir, path), 0755) + err := os.MkdirAll(filepath.Join(tempDir, path), 0o755) require.NoError(t, err) return nil } @@ -46,7 +46,7 @@ func createFakeRepo(t *testing.T, testdataName string) string { absPath := copyTestdata(t, testdataName) // Add .git directory to make it look like a Git repository. - err := os.Mkdir(filepath.Join(absPath, ".git"), 0755) + err := os.Mkdir(filepath.Join(absPath, ".git"), 0o755) require.NoError(t, err) return absPath } diff --git a/libs/git/worktree_test.go b/libs/git/worktree_test.go index 3d620c483..072a9d348 100644 --- a/libs/git/worktree_test.go +++ b/libs/git/worktree_test.go @@ -53,12 +53,12 @@ func TestWorktreeResolveGitDir(t *testing.T) { writeGitCommonDir(t, dir, "../..") t.Run("relative", func(t *testing.T) { - writeGitDir(t, dir, fmt.Sprintf("gitdir: %s", "../.git/worktrees/my_worktree")) + writeGitDir(t, dir, "gitdir: "+"../.git/worktrees/my_worktree") verifyCorrectDirs(t, dir) }) t.Run("absolute", func(t *testing.T) { - writeGitDir(t, dir, fmt.Sprintf("gitdir: %s", filepath.Join(dir, ".git/worktrees/my_worktree"))) + writeGitDir(t, dir, "gitdir: "+filepath.Join(dir, ".git/worktrees/my_worktree")) verifyCorrectDirs(t, dir) }) @@ -77,7 +77,7 @@ func TestWorktreeResolveGitDir(t *testing.T) { func TestWorktreeResolveCommonDir(t *testing.T) { dir := setupWorktree(t) - writeGitDir(t, dir, fmt.Sprintf("gitdir: %s", "../.git/worktrees/my_worktree")) + writeGitDir(t, dir, "gitdir: "+"../.git/worktrees/my_worktree") t.Run("relative", func(t *testing.T) { writeGitCommonDir(t, dir, "../..") diff --git a/libs/jsonschema/extension.go b/libs/jsonschema/extension.go index 3e32caf1a..9badf86a5 100644 --- a/libs/jsonschema/extension.go +++ b/libs/jsonschema/extension.go @@ -34,4 +34,10 @@ type Extension struct { // Version of the schema. This is used to determine if the schema is // compatible with the current CLI version. Version *int `json:"version,omitempty"` + + // This field is not in JSON schema spec, but it is supported in VSCode and in the Databricks Workspace + // It is used to provide a rich description of the field in the hover tooltip. + // https://code.visualstudio.com/docs/languages/json#_use-rich-formatting-in-hovers + // Also it can be used in documentation generation. + MarkdownDescription string `json:"markdownDescription,omitempty"` } diff --git a/libs/jsonschema/from_type.go b/libs/jsonschema/from_type.go index 18a2b3ba5..6f8f39d96 100644 --- a/libs/jsonschema/from_type.go +++ b/libs/jsonschema/from_type.go @@ -211,7 +211,7 @@ func getStructFields(typ reflect.Type) []reflect.StructField { fields := []reflect.StructField{} bfsQueue := list.New() - for i := 0; i < typ.NumField(); i++ { + for i := range typ.NumField() { bfsQueue.PushBack(typ.Field(i)) } for bfsQueue.Len() > 0 { @@ -233,7 +233,7 @@ func getStructFields(typ reflect.Type) []reflect.StructField { fieldType = fieldType.Elem() } - for i := 0; i < fieldType.NumField(); i++ { + for i := range fieldType.NumField() { bfsQueue.PushBack(fieldType.Field(i)) } } diff --git a/libs/jsonschema/from_type_test.go b/libs/jsonschema/from_type_test.go index 0ddb1011a..cdfdcfd10 100644 --- a/libs/jsonschema/from_type_test.go +++ b/libs/jsonschema/from_type_test.go @@ -403,7 +403,8 @@ func TestFromTypeError(t *testing.T) { // Maps with non-string keys should panic. type mapOfInts map[int]int assert.PanicsWithValue(t, "found map with non-string key: int", func() { - FromType(reflect.TypeOf(mapOfInts{}), nil) + _, err := FromType(reflect.TypeOf(mapOfInts{}), nil) + require.NoError(t, err) }) // Unsupported types should return an error. diff --git a/libs/jsonschema/instance.go b/libs/jsonschema/instance.go index 4440a2fe2..eb36822a0 100644 --- a/libs/jsonschema/instance.go +++ b/libs/jsonschema/instance.go @@ -2,6 +2,7 @@ package jsonschema import ( "encoding/json" + "errors" "fmt" "os" "slices" @@ -149,7 +150,7 @@ func (s *Schema) validateAnyOf(instance map[string]any) error { // According to the JSON schema RFC, anyOf must contain at least one schema. // https://json-schema.org/draft/2020-12/json-schema-core if len(s.AnyOf) == 0 { - return fmt.Errorf("anyOf must contain at least one schema") + return errors.New("anyOf must contain at least one schema") } for _, anyOf := range s.AnyOf { @@ -158,5 +159,5 @@ func (s *Schema) validateAnyOf(instance map[string]any) error { return nil } } - return fmt.Errorf("instance does not match any of the schemas in anyOf") + return errors.New("instance does not match any of the schemas in anyOf") } diff --git a/libs/jsonschema/schema.go b/libs/jsonschema/schema.go index b9c3fb08c..e63dde359 100644 --- a/libs/jsonschema/schema.go +++ b/libs/jsonschema/schema.go @@ -69,6 +69,13 @@ type Schema struct { // Schema that must match any of the schemas in the array AnyOf []Schema `json:"anyOf,omitempty"` + + // Schema that must match one of the schemas in the array + OneOf []Schema `json:"oneOf,omitempty"` + + // Title of the object, rendered as inline documentation in the IDE. + // https://json-schema.org/understanding-json-schema/reference/annotations + Title string `json:"title,omitempty"` } // Default value defined in a JSON Schema, represented as a string. diff --git a/libs/jsonschema/utils.go b/libs/jsonschema/utils.go index ff9b88312..bc9339cae 100644 --- a/libs/jsonschema/utils.go +++ b/libs/jsonschema/utils.go @@ -150,7 +150,7 @@ func (e patternMatchError) Error() string { // If custom user error message is defined, return error with the custom message msg := e.FailureMessage if msg == "" { - msg = fmt.Sprintf("Expected to match regex pattern: %s", e.Pattern) + msg = "Expected to match regex pattern: " + e.Pattern } return fmt.Sprintf("invalid value for %s: %q. %s", e.PropertyName, e.PropertyValue, msg) } diff --git a/libs/jsonschema/utils_test.go b/libs/jsonschema/utils_test.go index 89200dae3..954c723d3 100644 --- a/libs/jsonschema/utils_test.go +++ b/libs/jsonschema/utils_test.go @@ -96,7 +96,7 @@ func TestTemplateFromString(t *testing.T) { v, err = fromString("1.1", NumberType) assert.NoError(t, err) // Floating point conversions are not perfect - assert.True(t, (v.(float64)-1.1) < 0.000001) + assert.Less(t, (v.(float64) - 1.1), 0.000001) v, err = fromString("12345", IntegerType) assert.NoError(t, err) @@ -104,7 +104,7 @@ func TestTemplateFromString(t *testing.T) { v, err = fromString("123", NumberType) assert.NoError(t, err) - assert.Equal(t, float64(123), v) + assert.InDelta(t, float64(123), v.(float64), 0.0001) _, err = fromString("qrt", ArrayType) assert.EqualError(t, err, "cannot parse string as object of type array. Value of string: \"qrt\"") diff --git a/libs/jsonschema/validate_type.go b/libs/jsonschema/validate_type.go index 125d6b20b..9f70498ba 100644 --- a/libs/jsonschema/validate_type.go +++ b/libs/jsonschema/validate_type.go @@ -39,9 +39,11 @@ func validateNumber(v any) error { } func validateInteger(v any) error { - if !slices.Contains([]reflect.Kind{reflect.Int, reflect.Int8, reflect.Int16, + if !slices.Contains([]reflect.Kind{ + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, - reflect.Uint32, reflect.Uint64}, + reflect.Uint32, reflect.Uint64, + }, reflect.TypeOf(v).Kind()) { return fmt.Errorf("expected type integer, but value is %#v", v) } diff --git a/libs/locker/locker.go b/libs/locker/locker.go index b0d65c42e..aadc50b58 100644 --- a/libs/locker/locker.go +++ b/libs/locker/locker.go @@ -116,14 +116,14 @@ func (locker *Locker) assertLockHeld(ctx context.Context) error { // idempotent function since overwrite is set to true func (locker *Locker) Write(ctx context.Context, pathToFile string, content []byte) error { if !locker.Active { - return fmt.Errorf("failed to put file. deploy lock not held") + return errors.New("failed to put file. deploy lock not held") } return locker.filer.Write(ctx, pathToFile, bytes.NewReader(content), filer.OverwriteIfExists, filer.CreateParentDirectories) } func (locker *Locker) Read(ctx context.Context, path string) (io.ReadCloser, error) { if !locker.Active { - return nil, fmt.Errorf("failed to get file. deploy lock not held") + return nil, errors.New("failed to get file. deploy lock not held") } return locker.filer.Read(ctx, path) } @@ -140,7 +140,7 @@ func (locker *Locker) Lock(ctx context.Context, isForced bool) error { return err } - var modes = []filer.WriteMode{ + modes := []filer.WriteMode{ // Always create parent directory if it doesn't yet exist. filer.CreateParentDirectories, } @@ -173,7 +173,7 @@ func (locker *Locker) Lock(ctx context.Context, isForced bool) error { func (locker *Locker) Unlock(ctx context.Context, opts ...UnlockOption) error { if !locker.Active { - return fmt.Errorf("unlock called when lock is not held") + return errors.New("unlock called when lock is not held") } // if allowLockFileNotExist is set, do not throw an error if the lock file does @@ -196,7 +196,7 @@ func (locker *Locker) Unlock(ctx context.Context, opts ...UnlockOption) error { return nil } -func CreateLocker(user string, targetDir string, w *databricks.WorkspaceClient) (*Locker, error) { +func CreateLocker(user, targetDir string, w *databricks.WorkspaceClient) (*Locker, error) { filer, err := filer.NewWorkspaceFilesClient(w, targetDir) if err != nil { return nil, err diff --git a/libs/log/context.go b/libs/log/context.go index d9e31d116..5e3e8ccb6 100644 --- a/libs/log/context.go +++ b/libs/log/context.go @@ -2,7 +2,6 @@ package log import ( "context" - "log/slog" ) diff --git a/libs/log/logger.go b/libs/log/logger.go index 43a30e92b..c1d307c89 100644 --- a/libs/log/logger.go +++ b/libs/log/logger.go @@ -3,10 +3,9 @@ package log import ( "context" "fmt" + "log/slog" "runtime" "time" - - "log/slog" ) // GetLogger returns either the logger configured on the context, @@ -31,6 +30,51 @@ func log(logger *slog.Logger, ctx context.Context, level slog.Level, msg string) _ = logger.Handler().Handle(ctx, r) } +// Trace logs a string using the context-local or global logger. +func Trace(ctx context.Context, msg string) { + logger := GetLogger(ctx) + if !logger.Enabled(ctx, LevelTrace) { + return + } + log(logger, ctx, LevelTrace, msg) +} + +// Debug logs a string using the context-local or global logger. +func Debug(ctx context.Context, msg string) { + logger := GetLogger(ctx) + if !logger.Enabled(ctx, LevelDebug) { + return + } + log(logger, ctx, LevelDebug, msg) +} + +// Info logs a string using the context-local or global logger. +func Info(ctx context.Context, msg string) { + logger := GetLogger(ctx) + if !logger.Enabled(ctx, LevelInfo) { + return + } + log(logger, ctx, LevelInfo, msg) +} + +// Warn logs a string using the context-local or global logger. +func Warn(ctx context.Context, msg string) { + logger := GetLogger(ctx) + if !logger.Enabled(ctx, LevelWarn) { + return + } + log(logger, ctx, LevelWarn, msg) +} + +// Error logs a string using the context-local or global logger. +func Error(ctx context.Context, msg string) { + logger := GetLogger(ctx) + if !logger.Enabled(ctx, LevelError) { + return + } + log(logger, ctx, LevelError, msg) +} + // Tracef logs a formatted string using the context-local or global logger. func Tracef(ctx context.Context, format string, v ...any) { logger := GetLogger(ctx) diff --git a/libs/log/sdk.go b/libs/log/sdk.go index e1b1ffed4..086f80f50 100644 --- a/libs/log/sdk.go +++ b/libs/log/sdk.go @@ -3,11 +3,10 @@ package log import ( "context" "fmt" + "log/slog" "runtime" "time" - "log/slog" - sdk "github.com/databricks/databricks-sdk-go/logger" ) diff --git a/libs/notebook/detect.go b/libs/notebook/detect.go index cd8680bfa..40c850945 100644 --- a/libs/notebook/detect.go +++ b/libs/notebook/detect.go @@ -46,7 +46,7 @@ func (f file) close() error { func (f file) readHeader() (string, error) { // Scan header line with some padding. - var buf = make([]byte, headerLength) + buf := make([]byte, headerLength) n, err := f.f.Read([]byte(buf)) if err != nil && err != io.EOF { return "", err diff --git a/libs/notebook/detect_jupyter_test.go b/libs/notebook/detect_jupyter_test.go index 4ff2aeff6..af29a2214 100644 --- a/libs/notebook/detect_jupyter_test.go +++ b/libs/notebook/detect_jupyter_test.go @@ -41,7 +41,7 @@ func TestDetectJupyterInvalidJSON(t *testing.T) { dir := t.TempDir() path := filepath.Join(dir, "file.ipynb") buf := make([]byte, 128) - err := os.WriteFile(path, buf, 0644) + err := os.WriteFile(path, buf, 0o644) require.NoError(t, err) // Garbage contents means not a notebook. @@ -55,7 +55,7 @@ func TestDetectJupyterNoCells(t *testing.T) { dir := t.TempDir() path := filepath.Join(dir, "file.ipynb") buf := []byte("{}") - err := os.WriteFile(path, buf, 0644) + err := os.WriteFile(path, buf, 0o644) require.NoError(t, err) // Garbage contents means not a notebook. @@ -69,7 +69,7 @@ func TestDetectJupyterOldVersion(t *testing.T) { dir := t.TempDir() path := filepath.Join(dir, "file.ipynb") buf := []byte(`{ "cells": [], "metadata": {}, "nbformat": 3 }`) - err := os.WriteFile(path, buf, 0644) + err := os.WriteFile(path, buf, 0o644) require.NoError(t, err) // Garbage contents means not a notebook. diff --git a/libs/notebook/detect_test.go b/libs/notebook/detect_test.go index 786c7e394..49a67d2d3 100644 --- a/libs/notebook/detect_test.go +++ b/libs/notebook/detect_test.go @@ -1,7 +1,6 @@ package notebook import ( - "errors" "io/fs" "os" "path/filepath" @@ -53,7 +52,7 @@ func TestDetectCallsDetectJupyter(t *testing.T) { func TestDetectUnknownExtension(t *testing.T) { _, _, err := Detect("./testdata/doesntexist.foobar") - assert.True(t, errors.Is(err, fs.ErrNotExist)) + assert.ErrorIs(t, err, fs.ErrNotExist) nb, _, err := Detect("./testdata/unknown_extension.foobar") require.NoError(t, err) @@ -62,7 +61,7 @@ func TestDetectUnknownExtension(t *testing.T) { func TestDetectNoExtension(t *testing.T) { _, _, err := Detect("./testdata/doesntexist") - assert.True(t, errors.Is(err, fs.ErrNotExist)) + assert.ErrorIs(t, err, fs.ErrNotExist) nb, _, err := Detect("./testdata/no_extension") require.NoError(t, err) @@ -78,7 +77,7 @@ func TestDetectEmptyFile(t *testing.T) { // Create empty file. dir := t.TempDir() path := filepath.Join(dir, "file.py") - err := os.WriteFile(path, nil, 0644) + err := os.WriteFile(path, nil, 0o644) require.NoError(t, err) // No contents means not a notebook. @@ -92,7 +91,7 @@ func TestDetectFileWithLongHeader(t *testing.T) { dir := t.TempDir() path := filepath.Join(dir, "file.py") buf := make([]byte, 128*1024) - err := os.WriteFile(path, buf, 0644) + err := os.WriteFile(path, buf, 0o644) require.NoError(t, err) // Garbage contents means not a notebook. diff --git a/libs/process/background_test.go b/libs/process/background_test.go index 2e47e814b..5cc810f5d 100644 --- a/libs/process/background_test.go +++ b/libs/process/background_test.go @@ -4,7 +4,7 @@ import ( "bufio" "bytes" "context" - "fmt" + "errors" "os/exec" "strings" "testing" @@ -95,13 +95,13 @@ func TestBackgroundNoStdin(t *testing.T) { func TestBackgroundFails(t *testing.T) { ctx := context.Background() _, err := Background(ctx, []string{"ls", "/dev/null/x"}) - assert.NotNil(t, err) + assert.Error(t, err) } func TestBackgroundFailsOnOption(t *testing.T) { ctx := context.Background() _, err := Background(ctx, []string{"ls", "/dev/null/x"}, func(_ context.Context, c *exec.Cmd) error { - return fmt.Errorf("nope") + return errors.New("nope") }) assert.EqualError(t, err, "nope") } diff --git a/libs/process/forwarded_test.go b/libs/process/forwarded_test.go index ddb79818f..71f0a6a63 100644 --- a/libs/process/forwarded_test.go +++ b/libs/process/forwarded_test.go @@ -27,7 +27,7 @@ func TestForwardedFails(t *testing.T) { err := Forwarded(ctx, []string{ "_non_existent_", }, strings.NewReader("abc\n"), &buf, &buf) - assert.NotNil(t, err) + assert.Error(t, err) } func TestForwardedFailsOnStdinPipe(t *testing.T) { @@ -39,5 +39,5 @@ func TestForwardedFailsOnStdinPipe(t *testing.T) { c.Stdin = strings.NewReader("x") return nil }) - assert.NotNil(t, err) + assert.Error(t, err) } diff --git a/libs/process/opts_test.go b/libs/process/opts_test.go index 3a819fbb9..8b5d51928 100644 --- a/libs/process/opts_test.go +++ b/libs/process/opts_test.go @@ -41,7 +41,7 @@ func TestWorksWithLibsEnv(t *testing.T) { vars := cmd.Environ() sort.Strings(vars) - assert.True(t, len(vars) >= 2) + assert.GreaterOrEqual(t, len(vars), 2) assert.Equal(t, "CCC=DDD", vars[0]) assert.Equal(t, "EEE=FFF", vars[1]) } diff --git a/libs/process/stub.go b/libs/process/stub.go index 8472f65d5..528489098 100644 --- a/libs/process/stub.go +++ b/libs/process/stub.go @@ -148,23 +148,34 @@ func (s *processStub) run(cmd *exec.Cmd) error { if !re.MatchString(norm) { continue } + err := resp.err if resp.stdout != "" { - cmd.Stdout.Write([]byte(resp.stdout)) + _, err1 := cmd.Stdout.Write([]byte(resp.stdout)) + if err == nil { + err = err1 + } } if resp.stderr != "" { - cmd.Stderr.Write([]byte(resp.stderr)) + _, err1 := cmd.Stderr.Write([]byte(resp.stderr)) + if err == nil { + err = err1 + } } - return resp.err + return err } if s.callback != nil { return s.callback(cmd) } var zeroStub reponseStub if s.reponseStub == zeroStub { - return fmt.Errorf("no default process stub") + return errors.New("no default process stub") } + err := s.reponseStub.err if s.reponseStub.stdout != "" { - cmd.Stdout.Write([]byte(s.reponseStub.stdout)) + _, err1 := cmd.Stdout.Write([]byte(s.reponseStub.stdout)) + if err == nil { + err = err1 + } } - return s.reponseStub.err + return err } diff --git a/libs/process/stub_test.go b/libs/process/stub_test.go index 65f59f817..158e8b3a6 100644 --- a/libs/process/stub_test.go +++ b/libs/process/stub_test.go @@ -2,7 +2,7 @@ package process_test import ( "context" - "fmt" + "errors" "os/exec" "testing" @@ -32,7 +32,7 @@ func TestStubOutput(t *testing.T) { func TestStubFailure(t *testing.T) { ctx := context.Background() ctx, stub := process.WithStub(ctx) - stub.WithFailure(fmt.Errorf("nope")) + stub.WithFailure(errors.New("nope")) _, err := process.Background(ctx, []string{"/bin/meeecho", "1"}) require.EqualError(t, err, "/bin/meeecho 1: nope") @@ -43,9 +43,15 @@ func TestStubCallback(t *testing.T) { ctx := context.Background() ctx, stub := process.WithStub(ctx) stub.WithCallback(func(cmd *exec.Cmd) error { - cmd.Stderr.Write([]byte("something...")) - cmd.Stdout.Write([]byte("else...")) - return fmt.Errorf("yep") + _, err := cmd.Stderr.Write([]byte("something...")) + if err != nil { + return err + } + _, err = cmd.Stdout.Write([]byte("else...")) + if err != nil { + return err + } + return errors.New("yep") }) _, err := process.Background(ctx, []string{"/bin/meeecho", "1"}) @@ -64,7 +70,7 @@ func TestStubResponses(t *testing.T) { stub. WithStdoutFor("qux 1", "first"). WithStdoutFor("qux 2", "second"). - WithFailureFor("qux 3", fmt.Errorf("nope")) + WithFailureFor("qux 3", errors.New("nope")) first, err := process.Background(ctx, []string{"/path/is/irrelevant/qux", "1"}) require.NoError(t, err) diff --git a/libs/python/detect.go b/libs/python/detect.go index 8fcc7cd9c..e86d9d621 100644 --- a/libs/python/detect.go +++ b/libs/python/detect.go @@ -11,6 +11,19 @@ import ( "runtime" ) +// GetExecutable gets appropriate python binary name for the platform +func GetExecutable() string { + // On Windows when virtualenv is created, the /Scripts directory + // contains python.exe but no python3.exe. + // Most installers (e.g. the ones from python.org) only install python.exe and not python3.exe + + if runtime.GOOS == "windows" { + return "python" + } else { + return "python3" + } +} + // DetectExecutable looks up the path to the python3 executable from the PATH // environment variable. // @@ -25,7 +38,9 @@ func DetectExecutable(ctx context.Context) (string, error) { // the parent directory tree. // // See https://github.com/pyenv/pyenv#understanding-python-version-selection - out, err := exec.LookPath("python3") + + out, err := exec.LookPath(GetExecutable()) + // most of the OS'es have python3 in $PATH, but for those which don't, // we perform the latest version lookup if err != nil && !errors.Is(err, exec.ErrNotFound) { @@ -54,7 +69,7 @@ func DetectExecutable(ctx context.Context) (string, error) { func DetectVEnvExecutable(venvPath string) (string, error) { interpreterPath := filepath.Join(venvPath, "bin", "python3") if runtime.GOOS == "windows" { - interpreterPath = filepath.Join(venvPath, "Scripts", "python3.exe") + interpreterPath = filepath.Join(venvPath, "Scripts", "python.exe") } if _, err := os.Stat(interpreterPath); err != nil { diff --git a/libs/python/detect_test.go b/libs/python/detect_test.go index 78c7067f7..0aeedb776 100644 --- a/libs/python/detect_test.go +++ b/libs/python/detect_test.go @@ -14,13 +14,13 @@ func TestDetectVEnvExecutable(t *testing.T) { dir := t.TempDir() interpreterPath := interpreterPath(dir) - err := os.Mkdir(filepath.Dir(interpreterPath), 0755) + err := os.Mkdir(filepath.Dir(interpreterPath), 0o755) require.NoError(t, err) - err = os.WriteFile(interpreterPath, []byte(""), 0755) + err = os.WriteFile(interpreterPath, []byte(""), 0o755) require.NoError(t, err) - err = os.WriteFile(filepath.Join(dir, "pyvenv.cfg"), []byte(""), 0755) + err = os.WriteFile(filepath.Join(dir, "pyvenv.cfg"), []byte(""), 0o755) require.NoError(t, err) executable, err := DetectVEnvExecutable(dir) @@ -39,7 +39,7 @@ func TestDetectVEnvExecutable_badLayout(t *testing.T) { func interpreterPath(venvPath string) string { if runtime.GOOS == "windows" { - return filepath.Join(venvPath, "Scripts", "python3.exe") + return filepath.Join(venvPath, "Scripts", "python.exe") } else { return filepath.Join(venvPath, "bin", "python3") } diff --git a/libs/python/interpreters.go b/libs/python/interpreters.go index 94f5074de..6071309a8 100644 --- a/libs/python/interpreters.go +++ b/libs/python/interpreters.go @@ -18,8 +18,10 @@ import ( var ErrNoPythonInterpreters = errors.New("no python3 interpreters found") -const officialMswinPython = "(Python Official) https://python.org/downloads/windows" -const microsoftStorePython = "(Microsoft Store) https://apps.microsoft.com/store/search?publisher=Python%20Software%20Foundation" +const ( + officialMswinPython = "(Python Official) https://python.org/downloads/windows" + microsoftStorePython = "(Microsoft Store) https://apps.microsoft.com/store/search?publisher=Python%20Software%20Foundation" +) const worldWriteable = 0o002 diff --git a/libs/python/interpreters_unix_test.go b/libs/python/interpreters_unix_test.go index e2b0a5a1c..57adc9279 100644 --- a/libs/python/interpreters_unix_test.go +++ b/libs/python/interpreters_unix_test.go @@ -18,7 +18,7 @@ func TestAtLeastOnePythonInstalled(t *testing.T) { assert.NoError(t, err) a := all.Latest() t.Logf("latest is: %s", a) - assert.True(t, len(all) > 0) + assert.NotEmpty(t, all) } func TestNoInterpretersFound(t *testing.T) { @@ -34,13 +34,14 @@ func TestFilteringInterpreters(t *testing.T) { rogueBin := filepath.Join(t.TempDir(), "rogue-bin") err := os.Mkdir(rogueBin, 0o777) assert.NoError(t, err) - os.Chmod(rogueBin, 0o777) + err = os.Chmod(rogueBin, 0o777) + assert.NoError(t, err) raw, err := os.ReadFile("testdata/world-writeable/python8.4") assert.NoError(t, err) injectedBinary := filepath.Join(rogueBin, "python8.4") - err = os.WriteFile(injectedBinary, raw, 00777) + err = os.WriteFile(injectedBinary, raw, 0o0777) assert.NoError(t, err) t.Setenv("PATH", "testdata/other-binaries-filtered:"+rogueBin) diff --git a/libs/python/pythontest/pythontest.go b/libs/python/pythontest/pythontest.go new file mode 100644 index 000000000..9a2dec0ee --- /dev/null +++ b/libs/python/pythontest/pythontest.go @@ -0,0 +1,107 @@ +package pythontest + +import ( + "context" + "errors" + "fmt" + "os" + "os/exec" + "path/filepath" + "runtime" + "strings" + "testing" + + "github.com/databricks/cli/internal/testutil" + "github.com/stretchr/testify/require" +) + +type VenvOpts struct { + // input + PythonVersion string + skipVersionCheck bool + + // input/output + Dir string + Name string + + // output: + // Absolute path to venv + EnvPath string + + // Absolute path to venv/bin or venv/Scripts, depending on OS + BinPath string + + // Absolute path to python binary + PythonExe string +} + +func CreatePythonEnv(opts *VenvOpts) error { + if opts == nil || opts.PythonVersion == "" { + return errors.New("PythonVersion must be provided") + } + if opts.Name == "" { + opts.Name = testutil.RandomName("test-venv-") + } + + cmd := exec.Command("uv", "venv", opts.Name, "--python", opts.PythonVersion, "--seed", "-q") + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + cmd.Dir = opts.Dir + err := cmd.Run() + if err != nil { + return err + } + + opts.EnvPath, err = filepath.Abs(filepath.Join(opts.Dir, opts.Name)) + if err != nil { + return err + } + + _, err = os.Stat(opts.EnvPath) + if err != nil { + return fmt.Errorf("cannot stat EnvPath %s: %s", opts.EnvPath, err) + } + + if runtime.GOOS == "windows" { + // https://github.com/pypa/virtualenv/commit/993ba1316a83b760370f5a3872b3f5ef4dd904c1 + opts.BinPath = filepath.Join(opts.EnvPath, "Scripts") + opts.PythonExe = filepath.Join(opts.BinPath, "python.exe") + } else { + opts.BinPath = filepath.Join(opts.EnvPath, "bin") + opts.PythonExe = filepath.Join(opts.BinPath, "python3") + } + + _, err = os.Stat(opts.BinPath) + if err != nil { + return fmt.Errorf("cannot stat BinPath %s: %s", opts.BinPath, err) + } + + _, err = os.Stat(opts.PythonExe) + if err != nil { + return fmt.Errorf("cannot stat PythonExe %s: %s", opts.PythonExe, err) + } + + if !opts.skipVersionCheck { + cmd := exec.Command(opts.PythonExe, "--version") + out, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("Failed to run %s --version: %s", opts.PythonExe, err) + } + outString := string(out) + expectVersion := "Python " + opts.PythonVersion + if !strings.HasPrefix(outString, expectVersion) { + return fmt.Errorf("Unexpected output from %s --version: %v (expected %v)", opts.PythonExe, outString, expectVersion) + } + } + + return nil +} + +func RequireActivatedPythonEnv(t *testing.T, ctx context.Context, opts *VenvOpts) { + err := CreatePythonEnv(opts) + require.NoError(t, err) + require.DirExists(t, opts.BinPath) + + newPath := fmt.Sprintf("%s%c%s", opts.BinPath, os.PathListSeparator, os.Getenv("PATH")) + t.Setenv("PATH", newPath) +} diff --git a/libs/python/pythontest/pythontest_test.go b/libs/python/pythontest/pythontest_test.go new file mode 100644 index 000000000..3161092d3 --- /dev/null +++ b/libs/python/pythontest/pythontest_test.go @@ -0,0 +1,43 @@ +package pythontest + +import ( + "context" + "os/exec" + "path/filepath" + "testing" + + "github.com/databricks/cli/libs/python" + "github.com/stretchr/testify/require" +) + +func TestVenvSuccess(t *testing.T) { + // Test at least two version to ensure we capture a case where venv version does not match system one + for _, pythonVersion := range []string{"3.11", "3.12"} { + t.Run(pythonVersion, func(t *testing.T) { + ctx := context.Background() + dir := t.TempDir() + opts := VenvOpts{ + PythonVersion: pythonVersion, + Dir: dir, + } + RequireActivatedPythonEnv(t, ctx, &opts) + require.DirExists(t, opts.EnvPath) + require.DirExists(t, opts.BinPath) + require.FileExists(t, opts.PythonExe) + + pythonExe, err := exec.LookPath(python.GetExecutable()) + require.NoError(t, err) + require.Equal(t, filepath.Dir(pythonExe), filepath.Dir(opts.PythonExe)) + require.FileExists(t, pythonExe) + }) + } +} + +func TestWrongVersion(t *testing.T) { + require.Error(t, CreatePythonEnv(&VenvOpts{PythonVersion: "4.0"})) +} + +func TestMissingVersion(t *testing.T) { + require.Error(t, CreatePythonEnv(nil)) + require.Error(t, CreatePythonEnv(&VenvOpts{})) +} diff --git a/libs/sync/diff.go b/libs/sync/diff.go index e91f7277e..d81a3ae65 100644 --- a/libs/sync/diff.go +++ b/libs/sync/diff.go @@ -20,7 +20,7 @@ func (d diff) IsEmpty() bool { // Compute operations required to make files in WSFS reflect current local files. // Takes into account changes since the last sync iteration. -func computeDiff(after *SnapshotState, before *SnapshotState) diff { +func computeDiff(after, before *SnapshotState) diff { d := &diff{ delete: make([]string, 0), rmdir: make([]string, 0), @@ -35,7 +35,7 @@ func computeDiff(after *SnapshotState, before *SnapshotState) diff { } // Add operators for tracked files that no longer exist. -func (d *diff) addRemovedFiles(after *SnapshotState, before *SnapshotState) { +func (d *diff) addRemovedFiles(after, before *SnapshotState) { for localName, remoteName := range before.LocalToRemoteNames { if _, ok := after.LocalToRemoteNames[localName]; !ok { d.delete = append(d.delete, remoteName) @@ -50,7 +50,7 @@ func (d *diff) addRemovedFiles(after *SnapshotState, before *SnapshotState) { // Cleanup previous remote files for files that had their remote targets change. For // example this is possible if you convert a normal python script to a notebook. -func (d *diff) addFilesWithRemoteNameChanged(after *SnapshotState, before *SnapshotState) { +func (d *diff) addFilesWithRemoteNameChanged(after, before *SnapshotState) { for localName, beforeRemoteName := range before.LocalToRemoteNames { afterRemoteName, ok := after.LocalToRemoteNames[localName] if ok && afterRemoteName != beforeRemoteName { @@ -60,7 +60,7 @@ func (d *diff) addFilesWithRemoteNameChanged(after *SnapshotState, before *Snaps } // Add operators for files that were not being tracked before. -func (d *diff) addNewFiles(after *SnapshotState, before *SnapshotState) { +func (d *diff) addNewFiles(after, before *SnapshotState) { for localName := range after.LastModifiedTimes { if _, ok := before.LastModifiedTimes[localName]; !ok { d.put = append(d.put, localName) @@ -74,7 +74,7 @@ func (d *diff) addNewFiles(after *SnapshotState, before *SnapshotState) { } // Add operators for files which had their contents updated. -func (d *diff) addUpdatedFiles(after *SnapshotState, before *SnapshotState) { +func (d *diff) addUpdatedFiles(after, before *SnapshotState) { for localName, modTime := range after.LastModifiedTimes { prevModTime, ok := before.LastModifiedTimes[localName] if ok && modTime.After(prevModTime) { diff --git a/libs/sync/event.go b/libs/sync/event.go index 8e5c0efa2..510a01954 100644 --- a/libs/sync/event.go +++ b/libs/sync/event.go @@ -52,10 +52,10 @@ func (e *EventChanges) IsEmpty() bool { func (e *EventChanges) String() string { var changes []string if len(e.Put) > 0 { - changes = append(changes, fmt.Sprintf("PUT: %s", strings.Join(e.Put, ", "))) + changes = append(changes, "PUT: "+strings.Join(e.Put, ", ")) } if len(e.Delete) > 0 { - changes = append(changes, fmt.Sprintf("DELETE: %s", strings.Join(e.Delete, ", "))) + changes = append(changes, "DELETE: "+strings.Join(e.Delete, ", ")) } return strings.Join(changes, ", ") } @@ -70,10 +70,10 @@ func (e *EventStart) String() string { return "" } - return fmt.Sprintf("Action: %s", e.EventChanges.String()) + return "Action: " + e.EventChanges.String() } -func newEventStart(seq int, put []string, delete []string) Event { +func newEventStart(seq int, put, delete []string) Event { return &EventStart{ EventBase: newEventBase(seq, EventTypeStart), EventChanges: &EventChanges{Put: put, Delete: delete}, @@ -98,9 +98,9 @@ func (e *EventSyncProgress) String() string { switch e.Action { case EventActionPut: - return fmt.Sprintf("Uploaded %s", e.Path) + return "Uploaded " + e.Path case EventActionDelete: - return fmt.Sprintf("Deleted %s", e.Path) + return "Deleted " + e.Path default: panic("invalid action") } @@ -133,7 +133,7 @@ func (e *EventSyncComplete) String() string { return "Complete" } -func newEventComplete(seq int, put []string, delete []string) Event { +func newEventComplete(seq int, put, delete []string) Event { return &EventSyncComplete{ EventBase: newEventBase(seq, EventTypeComplete), EventChanges: &EventChanges{Put: put, Delete: delete}, diff --git a/libs/sync/output.go b/libs/sync/output.go index c01b25ef6..e6ac8c56c 100644 --- a/libs/sync/output.go +++ b/libs/sync/output.go @@ -43,9 +43,9 @@ func TextOutput(ctx context.Context, ch <-chan Event, w io.Writer) { // Log only if something actually happened. // Sync events produce an empty string if nothing happened. if str := e.String(); str != "" { - bw.WriteString(str) - bw.WriteString("\n") - bw.Flush() + _, _ = bw.WriteString(str) + _, _ = bw.WriteString("\n") + _ = bw.Flush() } } } diff --git a/libs/sync/path.go b/libs/sync/path.go index 97a908965..87397be4b 100644 --- a/libs/sync/path.go +++ b/libs/sync/path.go @@ -14,7 +14,7 @@ import ( ) func repoPathForPath(me *iam.User, remotePath string) string { - base := path.Clean(fmt.Sprintf("/Repos/%s", me.UserName)) + base := path.Clean("/Repos/" + me.UserName) remotePath = path.Clean(remotePath) for strings.HasPrefix(path.Dir(remotePath), base) && path.Dir(remotePath) != base { remotePath = path.Dir(remotePath) diff --git a/libs/sync/snapshot.go b/libs/sync/snapshot.go index f2920d8c2..a596531b9 100644 --- a/libs/sync/snapshot.go +++ b/libs/sync/snapshot.go @@ -2,6 +2,8 @@ package sync import ( "context" + "crypto/md5" + "encoding/hex" "encoding/json" "errors" "fmt" @@ -10,9 +12,6 @@ import ( "path/filepath" "time" - "crypto/md5" - "encoding/hex" - "github.com/databricks/cli/libs/fileset" "github.com/databricks/cli/libs/log" ) @@ -91,7 +90,7 @@ func GetFileName(host, remotePath string) string { func SnapshotPath(opts *SyncOptions) (string, error) { snapshotDir := filepath.Join(opts.SnapshotBasePath, syncSnapshotDirName) if _, err := os.Stat(snapshotDir); errors.Is(err, fs.ErrNotExist) { - err = os.MkdirAll(snapshotDir, 0755) + err = os.MkdirAll(snapshotDir, 0o755) if err != nil { return "", fmt.Errorf("failed to create config directory: %s", err) } @@ -122,7 +121,7 @@ func newSnapshot(ctx context.Context, opts *SyncOptions) (*Snapshot, error) { } func (s *Snapshot) Save(ctx context.Context) error { - f, err := os.OpenFile(s.snapshotPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) + f, err := os.OpenFile(s.snapshotPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0o644) if err != nil { return fmt.Errorf("failed to create/open persisted sync snapshot file: %s", err) } diff --git a/libs/sync/snapshot_state.go b/libs/sync/snapshot_state.go index 09bb5b63e..d8660ee6a 100644 --- a/libs/sync/snapshot_state.go +++ b/libs/sync/snapshot_state.go @@ -51,7 +51,6 @@ func NewSnapshotState(localFiles []fileset.File) (*SnapshotState, error) { // Compute the remote name the file will have in WSFS remoteName := f.Relative isNotebook, err := f.IsNotebook() - if err != nil { // Ignore this file if we're unable to determine the notebook type. // Trying to upload such a file to the workspace would fail anyway. diff --git a/libs/sync/snapshot_test.go b/libs/sync/snapshot_test.go index eef526e58..4ba3874ae 100644 --- a/libs/sync/snapshot_test.go +++ b/libs/sync/snapshot_test.go @@ -51,7 +51,7 @@ func TestDiff(t *testing.T) { assert.NoError(t, err) change, err := state.diff(ctx, files) assert.NoError(t, err) - assert.Len(t, change.delete, 0) + assert.Empty(t, change.delete) assert.Len(t, change.put, 2) assert.Contains(t, change.put, "hello.txt") assert.Contains(t, change.put, "world.txt") @@ -67,7 +67,7 @@ func TestDiff(t *testing.T) { change, err = state.diff(ctx, files) assert.NoError(t, err) - assert.Len(t, change.delete, 0) + assert.Empty(t, change.delete) assert.Len(t, change.put, 1) assert.Contains(t, change.put, "world.txt") assertKeysOfMap(t, state.LastModifiedTimes, []string{"hello.txt", "world.txt"}) @@ -82,7 +82,7 @@ func TestDiff(t *testing.T) { change, err = state.diff(ctx, files) assert.NoError(t, err) assert.Len(t, change.delete, 1) - assert.Len(t, change.put, 0) + assert.Empty(t, change.put) assert.Contains(t, change.delete, "hello.txt") assertKeysOfMap(t, state.LastModifiedTimes, []string{"world.txt"}) assert.Equal(t, map[string]string{"world.txt": "world.txt"}, state.LocalToRemoteNames) @@ -145,8 +145,8 @@ func TestFolderDiff(t *testing.T) { assert.NoError(t, err) change, err := state.diff(ctx, files) assert.NoError(t, err) - assert.Len(t, change.delete, 0) - assert.Len(t, change.rmdir, 0) + assert.Empty(t, change.delete) + assert.Empty(t, change.rmdir) assert.Len(t, change.mkdir, 1) assert.Len(t, change.put, 1) assert.Contains(t, change.mkdir, "foo") @@ -159,8 +159,8 @@ func TestFolderDiff(t *testing.T) { assert.NoError(t, err) assert.Len(t, change.delete, 1) assert.Len(t, change.rmdir, 1) - assert.Len(t, change.mkdir, 0) - assert.Len(t, change.put, 0) + assert.Empty(t, change.mkdir) + assert.Empty(t, change.put) assert.Contains(t, change.delete, "foo/bar") assert.Contains(t, change.rmdir, "foo") } @@ -189,7 +189,7 @@ func TestPythonNotebookDiff(t *testing.T) { foo.Overwrite(t, "# Databricks notebook source\nprint(\"abc\")") change, err := state.diff(ctx, files) assert.NoError(t, err) - assert.Len(t, change.delete, 0) + assert.Empty(t, change.delete) assert.Len(t, change.put, 1) assert.Contains(t, change.put, "foo.py") assertKeysOfMap(t, state.LastModifiedTimes, []string{"foo.py"}) @@ -233,9 +233,9 @@ func TestPythonNotebookDiff(t *testing.T) { change, err = state.diff(ctx, files) assert.NoError(t, err) assert.Len(t, change.delete, 1) - assert.Len(t, change.put, 0) + assert.Empty(t, change.put) assert.Contains(t, change.delete, "foo") - assert.Len(t, state.LastModifiedTimes, 0) + assert.Empty(t, state.LastModifiedTimes) assert.Equal(t, map[string]string{}, state.LocalToRemoteNames) assert.Equal(t, map[string]string{}, state.RemoteToLocalNames) } @@ -264,7 +264,7 @@ func TestErrorWhenIdenticalRemoteName(t *testing.T) { assert.NoError(t, err) change, err := state.diff(ctx, files) assert.NoError(t, err) - assert.Len(t, change.delete, 0) + assert.Empty(t, change.delete) assert.Len(t, change.put, 2) assert.Contains(t, change.put, "foo.py") assert.Contains(t, change.put, "foo") @@ -300,7 +300,7 @@ func TestNoErrorRenameWithIdenticalRemoteName(t *testing.T) { assert.NoError(t, err) change, err := state.diff(ctx, files) assert.NoError(t, err) - assert.Len(t, change.delete, 0) + assert.Empty(t, change.delete) assert.Len(t, change.put, 1) assert.Contains(t, change.put, "foo.py") diff --git a/libs/sync/sync.go b/libs/sync/sync.go index 6bd26f224..f13fa934a 100644 --- a/libs/sync/sync.go +++ b/libs/sync/sync.go @@ -2,6 +2,7 @@ package sync import ( "context" + "errors" "fmt" stdsync "sync" "time" @@ -93,7 +94,7 @@ func New(ctx context.Context, opts SyncOptions) (*Sync, error) { // specify the workspace by its resource ID. tracked in: https://databricks.atlassian.net/browse/DECO-194 opts.Host = opts.WorkspaceClient.Config.Host if opts.Host == "" { - return nil, fmt.Errorf("failed to resolve host for snapshot") + return nil, errors.New("failed to resolve host for snapshot") } // For full sync, we start with an empty snapshot. @@ -117,7 +118,7 @@ func New(ctx context.Context, opts SyncOptions) (*Sync, error) { } var notifier EventNotifier - var outputWaitGroup = &stdsync.WaitGroup{} + outputWaitGroup := &stdsync.WaitGroup{} if opts.OutputHandler != nil { ch := make(chan Event, MaxRequestsInFlight) notifier = &ChannelNotifier{ch} diff --git a/libs/sync/sync_test.go b/libs/sync/sync_test.go index 6168dc217..f30431770 100644 --- a/libs/sync/sync_test.go +++ b/libs/sync/sync_test.go @@ -59,7 +59,7 @@ func TestGetFileSet(t *testing.T) { fileList, err := s.GetFileList(ctx) require.NoError(t, err) - require.Equal(t, len(fileList), 10) + require.Len(t, fileList, 10) inc, err = fileset.NewGlobSet(root, []string{}) require.NoError(t, err) @@ -77,7 +77,7 @@ func TestGetFileSet(t *testing.T) { fileList, err = s.GetFileList(ctx) require.NoError(t, err) - require.Equal(t, len(fileList), 2) + require.Len(t, fileList, 2) inc, err = fileset.NewGlobSet(root, []string{"./.databricks/*.go"}) require.NoError(t, err) @@ -95,7 +95,7 @@ func TestGetFileSet(t *testing.T) { fileList, err = s.GetFileList(ctx) require.NoError(t, err) - require.Equal(t, len(fileList), 11) + require.Len(t, fileList, 11) } func TestRecursiveExclude(t *testing.T) { @@ -125,7 +125,7 @@ func TestRecursiveExclude(t *testing.T) { fileList, err := s.GetFileList(ctx) require.NoError(t, err) - require.Equal(t, len(fileList), 7) + require.Len(t, fileList, 7) } func TestNegateExclude(t *testing.T) { @@ -155,6 +155,6 @@ func TestNegateExclude(t *testing.T) { fileList, err := s.GetFileList(ctx) require.NoError(t, err) - require.Equal(t, len(fileList), 1) - require.Equal(t, fileList[0].Relative, "test/sub1/sub2/h.txt") + require.Len(t, fileList, 1) + require.Equal(t, "test/sub1/sub2/h.txt", fileList[0].Relative) } diff --git a/libs/tags/gcp_test.go b/libs/tags/gcp_test.go index 89f4fd8e6..7c960acbb 100644 --- a/libs/tags/gcp_test.go +++ b/libs/tags/gcp_test.go @@ -38,7 +38,6 @@ func TestGcpNormalizeKey(t *testing.T) { assert.Equal(t, "test", gcpTag.NormalizeKey("test")) assert.Equal(t, "cafe", gcpTag.NormalizeKey("café 🍎?")) assert.Equal(t, "cafe_foo", gcpTag.NormalizeKey("__café_foo__")) - } func TestGcpNormalizeValue(t *testing.T) { diff --git a/libs/tags/tag.go b/libs/tags/tag.go index 4e9b329ca..64eab947e 100644 --- a/libs/tags/tag.go +++ b/libs/tags/tag.go @@ -1,6 +1,7 @@ package tags import ( + "errors" "fmt" "regexp" "strings" @@ -21,13 +22,13 @@ type tag struct { func (t *tag) ValidateKey(s string) error { if len(s) == 0 { - return fmt.Errorf("key must not be empty") + return errors.New("key must not be empty") } if len(s) > t.keyLength { return fmt.Errorf("key length %d exceeds maximum of %d", len(s), t.keyLength) } if strings.ContainsFunc(s, func(r rune) bool { return !unicode.Is(latin1, r) }) { - return fmt.Errorf("key contains non-latin1 characters") + return errors.New("key contains non-latin1 characters") } if !t.keyPattern.MatchString(s) { return fmt.Errorf("key %q does not match pattern %q", s, t.keyPattern) @@ -40,7 +41,7 @@ func (t *tag) ValidateValue(s string) error { return fmt.Errorf("value length %d exceeds maximum of %d", len(s), t.valueLength) } if strings.ContainsFunc(s, func(r rune) bool { return !unicode.Is(latin1, r) }) { - return fmt.Errorf("value contains non-latin1 characters") + return errors.New("value contains non-latin1 characters") } if !t.valuePattern.MatchString(s) { return fmt.Errorf("value %q does not match pattern %q", s, t.valuePattern) diff --git a/libs/template/builtin_test.go b/libs/template/builtin_test.go index 504e0acca..79e04cb84 100644 --- a/libs/template/builtin_test.go +++ b/libs/template/builtin_test.go @@ -11,18 +11,24 @@ import ( func TestBuiltin(t *testing.T) { out, err := Builtin() require.NoError(t, err) - assert.Len(t, out, 3) + assert.GreaterOrEqual(t, len(out), 3) - // Confirm names. - assert.Equal(t, "dbt-sql", out[0].Name) - assert.Equal(t, "default-python", out[1].Name) - assert.Equal(t, "default-sql", out[2].Name) + // Create a map of templates by name for easier lookup + templates := make(map[string]*BuiltinTemplate) + for _, tmpl := range out { + templates[tmpl.Name] = &tmpl + } - // Confirm that the filesystems work. - _, err = fs.Stat(out[0].FS, `template/{{.project_name}}/dbt_project.yml.tmpl`) + // Verify all expected templates exist + assert.Contains(t, templates, "dbt-sql") + assert.Contains(t, templates, "default-python") + assert.Contains(t, templates, "default-sql") + + // Verify the filesystems work for each template + _, err = fs.Stat(templates["dbt-sql"].FS, `template/{{.project_name}}/dbt_project.yml.tmpl`) assert.NoError(t, err) - _, err = fs.Stat(out[1].FS, `template/{{.project_name}}/tests/main_test.py.tmpl`) + _, err = fs.Stat(templates["default-python"].FS, `template/{{.project_name}}/tests/main_test.py.tmpl`) assert.NoError(t, err) - _, err = fs.Stat(out[2].FS, `template/{{.project_name}}/src/orders_daily.sql.tmpl`) + _, err = fs.Stat(templates["default-sql"].FS, `template/{{.project_name}}/src/orders_daily.sql.tmpl`) assert.NoError(t, err) } diff --git a/libs/template/config.go b/libs/template/config.go index 8e7695b91..919ba2250 100644 --- a/libs/template/config.go +++ b/libs/template/config.go @@ -189,7 +189,7 @@ func (c *config) promptOnce(property *jsonschema.Schema, name, defaultVal, descr c.values[name], err = property.ParseString(userInput) if err != nil { // Show error and retry if validation fails - cmdio.LogString(c.ctx, fmt.Sprintf("Validation failed: %s", err.Error())) + cmdio.LogString(c.ctx, "Validation failed: "+err.Error()) return retriableError{err: err} } @@ -197,7 +197,7 @@ func (c *config) promptOnce(property *jsonschema.Schema, name, defaultVal, descr err = c.schema.ValidateInstance(c.values) if err != nil { // Show error and retry if validation fails - cmdio.LogString(c.ctx, fmt.Sprintf("Validation failed: %s", err.Error())) + cmdio.LogString(c.ctx, "Validation failed: "+err.Error()) return retriableError{err: err} } return nil diff --git a/libs/template/config_test.go b/libs/template/config_test.go index a855019b6..515a0b9f5 100644 --- a/libs/template/config_test.go +++ b/libs/template/config_test.go @@ -24,7 +24,7 @@ func TestTemplateConfigAssignValuesFromFile(t *testing.T) { err = c.assignValuesFromFile(filepath.Join(testDir, "config.json")) if assert.NoError(t, err) { assert.Equal(t, int64(1), c.values["int_val"]) - assert.Equal(t, float64(2), c.values["float_val"]) + assert.InDelta(t, float64(2), c.values["float_val"].(float64), 0.0001) assert.Equal(t, true, c.values["bool_val"]) assert.Equal(t, "hello", c.values["string_val"]) } @@ -44,7 +44,7 @@ func TestTemplateConfigAssignValuesFromFileDoesNotOverwriteExistingConfigs(t *te err = c.assignValuesFromFile(filepath.Join(testDir, "config.json")) if assert.NoError(t, err) { assert.Equal(t, int64(1), c.values["int_val"]) - assert.Equal(t, float64(2), c.values["float_val"]) + assert.InDelta(t, float64(2), c.values["float_val"].(float64), 0.0001) assert.Equal(t, true, c.values["bool_val"]) assert.Equal(t, "this-is-not-overwritten", c.values["string_val"]) } @@ -89,7 +89,7 @@ func TestTemplateConfigAssignValuesFromDefaultValues(t *testing.T) { err = c.assignDefaultValues(r) if assert.NoError(t, err) { assert.Equal(t, int64(123), c.values["int_val"]) - assert.Equal(t, float64(123), c.values["float_val"]) + assert.InDelta(t, float64(123), c.values["float_val"].(float64), 0.0001) assert.Equal(t, true, c.values["bool_val"]) assert.Equal(t, "hello", c.values["string_val"]) } @@ -110,7 +110,7 @@ func TestTemplateConfigAssignValuesFromTemplatedDefaultValues(t *testing.T) { err = c.assignDefaultValues(r) if assert.NoError(t, err) { assert.Equal(t, int64(123), c.values["int_val"]) - assert.Equal(t, float64(123), c.values["float_val"]) + assert.InDelta(t, float64(123), c.values["float_val"].(float64), 0.0001) assert.Equal(t, true, c.values["bool_val"]) assert.Equal(t, "world", c.values["string_val"]) } diff --git a/libs/template/file_test.go b/libs/template/file_test.go index bd5f6d632..f4bf5652c 100644 --- a/libs/template/file_test.go +++ b/libs/template/file_test.go @@ -8,6 +8,7 @@ import ( "runtime" "testing" + "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/filer" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -27,8 +28,8 @@ func testInMemoryFile(t *testing.T, ctx context.Context, perm fs.FileMode) { err = f.Write(ctx, out) assert.NoError(t, err) - assertFileContent(t, filepath.Join(tmpDir, "a/b/c"), "123") - assertFilePermissions(t, filepath.Join(tmpDir, "a/b/c"), perm) + testutil.AssertFileContents(t, filepath.Join(tmpDir, "a/b/c"), "123") + testutil.AssertFilePermissions(t, filepath.Join(tmpDir, "a/b/c"), perm) } func testCopyFile(t *testing.T, ctx context.Context, perm fs.FileMode) { @@ -48,8 +49,8 @@ func testCopyFile(t *testing.T, ctx context.Context, perm fs.FileMode) { err = f.Write(ctx, out) assert.NoError(t, err) - assertFileContent(t, filepath.Join(tmpDir, "a/b/c"), "qwerty") - assertFilePermissions(t, filepath.Join(tmpDir, "a/b/c"), perm) + testutil.AssertFileContents(t, filepath.Join(tmpDir, "source"), "qwerty") + testutil.AssertFilePermissions(t, filepath.Join(tmpDir, "source"), perm) } func TestTemplateInMemoryFilePersistToDisk(t *testing.T) { @@ -57,7 +58,7 @@ func TestTemplateInMemoryFilePersistToDisk(t *testing.T) { t.SkipNow() } ctx := context.Background() - testInMemoryFile(t, ctx, 0755) + testInMemoryFile(t, ctx, 0o755) } func TestTemplateInMemoryFilePersistToDiskForWindows(t *testing.T) { @@ -67,7 +68,7 @@ func TestTemplateInMemoryFilePersistToDiskForWindows(t *testing.T) { // we have separate tests for windows because of differences in valid // fs.FileMode values we can use for different operating systems. ctx := context.Background() - testInMemoryFile(t, ctx, 0666) + testInMemoryFile(t, ctx, 0o666) } func TestTemplateCopyFilePersistToDisk(t *testing.T) { @@ -75,7 +76,7 @@ func TestTemplateCopyFilePersistToDisk(t *testing.T) { t.SkipNow() } ctx := context.Background() - testCopyFile(t, ctx, 0644) + testCopyFile(t, ctx, 0o644) } func TestTemplateCopyFilePersistToDiskForWindows(t *testing.T) { @@ -85,5 +86,5 @@ func TestTemplateCopyFilePersistToDiskForWindows(t *testing.T) { // we have separate tests for windows because of differences in valid // fs.FileMode values we can use for different operating systems. ctx := context.Background() - testCopyFile(t, ctx, 0666) + testCopyFile(t, ctx, 0o666) } diff --git a/libs/template/helpers.go b/libs/template/helpers.go index 7f7acbd24..4550e5fa2 100644 --- a/libs/template/helpers.go +++ b/libs/template/helpers.go @@ -31,9 +31,11 @@ type pair struct { v any } -var cachedUser *iam.User -var cachedIsServicePrincipal *bool -var cachedCatalog *string +var ( + cachedUser *iam.User + cachedIsServicePrincipal *bool + cachedCatalog *string +) // UUID that is stable for the duration of the template execution. This can be used // to populate the `bundle.uuid` field in databricks.yml by template authors. diff --git a/libs/template/helpers_test.go b/libs/template/helpers_test.go index 6c476c658..f8bc1f3da 100644 --- a/libs/template/helpers_test.go +++ b/libs/template/helpers_test.go @@ -86,7 +86,7 @@ func TestTemplateRandIntFunction(t *testing.T) { assert.Len(t, r.files, 1) randInt, err := strconv.Atoi(strings.TrimSpace(string(r.files[0].(*inMemoryFile).content))) assert.Less(t, randInt, 10) - assert.Empty(t, err) + assert.NoError(t, err) } func TestTemplateUuidFunction(t *testing.T) { @@ -158,12 +158,11 @@ func TestWorkspaceHost(t *testing.T) { assert.Len(t, r.files, 1) assert.Contains(t, string(r.files[0].(*inMemoryFile).content), "https://myhost.com") assert.Contains(t, string(r.files[0].(*inMemoryFile).content), "i3.xlarge") - } func TestWorkspaceHostNotConfigured(t *testing.T) { ctx := context.Background() - cmd := cmdio.NewIO(flags.OutputJSON, strings.NewReader(""), os.Stdout, os.Stderr, "", "template") + cmd := cmdio.NewIO(ctx, flags.OutputJSON, strings.NewReader(""), os.Stdout, os.Stderr, "", "template") ctx = cmdio.InContext(ctx, cmd) w := &databricks.WorkspaceClient{ @@ -178,5 +177,4 @@ func TestWorkspaceHostNotConfigured(t *testing.T) { err = r.walk() require.ErrorContains(t, err, "cannot determine target workspace") - } diff --git a/libs/template/materialize.go b/libs/template/materialize.go index ee30444a5..86a6a8c37 100644 --- a/libs/template/materialize.go +++ b/libs/template/materialize.go @@ -10,9 +10,11 @@ import ( "github.com/databricks/cli/libs/filer" ) -const libraryDirName = "library" -const templateDirName = "template" -const schemaFileName = "databricks_template_schema.json" +const ( + libraryDirName = "library" + templateDirName = "template" + schemaFileName = "databricks_template_schema.json" +) // This function materializes the input templates as a project, using user defined // configurations. diff --git a/libs/template/materialize_test.go b/libs/template/materialize_test.go index f7cd916e3..c9331b43f 100644 --- a/libs/template/materialize_test.go +++ b/libs/template/materialize_test.go @@ -2,7 +2,6 @@ package template import ( "context" - "fmt" "os" "testing" @@ -20,5 +19,5 @@ func TestMaterializeForNonTemplateDirectory(t *testing.T) { // Try to materialize a non-template directory. err = Materialize(ctx, "", os.DirFS(tmpDir), nil) - assert.EqualError(t, err, fmt.Sprintf("not a bundle template: expected to find a template schema file at %s", schemaFileName)) + assert.EqualError(t, err, "not a bundle template: expected to find a template schema file at "+schemaFileName) } diff --git a/libs/template/renderer.go b/libs/template/renderer.go index 0f30a67d0..679b7d8b7 100644 --- a/libs/template/renderer.go +++ b/libs/template/renderer.go @@ -150,6 +150,10 @@ func (r *renderer) computeFile(relPathTemplate string) (file, error) { } perm := info.Mode().Perm() + // Always include the write bit for the owner of the file. + // It does not make sense to have a file that is not writable by the owner. + perm |= 0o200 + // Execute relative path template to get destination path for the file relPath, err := r.executeTemplate(relPathTemplate) if err != nil { @@ -310,7 +314,7 @@ func (r *renderer) persistToDisk(ctx context.Context, out filer.Filer) error { if err == nil { return fmt.Errorf("failed to initialize template, one or more files already exist: %s", path) } - if err != nil && !errors.Is(err, fs.ErrNotExist) { + if !errors.Is(err, fs.ErrNotExist) { return fmt.Errorf("error while verifying file %s does not already exist: %w", path, err) } } diff --git a/libs/template/renderer_test.go b/libs/template/renderer_test.go index a4b9166da..b2ec388bd 100644 --- a/libs/template/renderer_test.go +++ b/libs/template/renderer_test.go @@ -2,7 +2,6 @@ package template import ( "context" - "fmt" "io/fs" "os" "path" @@ -17,6 +16,7 @@ import ( "github.com/databricks/cli/bundle/phases" "github.com/databricks/cli/cmd/root" "github.com/databricks/cli/internal/testutil" + "github.com/databricks/cli/libs/dbr" "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/filer" "github.com/databricks/cli/libs/tags" @@ -27,20 +27,23 @@ import ( "github.com/stretchr/testify/require" ) -func assertFileContent(t *testing.T, path string, content string) { - b, err := os.ReadFile(path) - require.NoError(t, err) - assert.Equal(t, content, string(b)) +var ( + defaultFilePermissions fs.FileMode + defaultDirPermissions fs.FileMode +) + +func init() { + if runtime.GOOS == "windows" { + defaultFilePermissions = fs.FileMode(0o666) + defaultDirPermissions = fs.FileMode(0o777) + } else { + defaultFilePermissions = fs.FileMode(0o644) + defaultDirPermissions = fs.FileMode(0o755) + } } -func assertFilePermissions(t *testing.T, path string, perm fs.FileMode) { - info, err := os.Stat(path) - require.NoError(t, err) - assert.Equal(t, perm, info.Mode().Perm()) -} - -func assertBuiltinTemplateValid(t *testing.T, template string, settings map[string]any, target string, isServicePrincipal bool, build bool, tempDir string) { - ctx := context.Background() +func assertBuiltinTemplateValid(t *testing.T, template string, settings map[string]any, target string, isServicePrincipal, build bool, tempDir string) { + ctx := dbr.MockRuntime(context.Background(), false) templateFS, err := fs.Sub(builtinTemplates, path.Join("templates", template)) require.NoError(t, err) @@ -69,6 +72,10 @@ func assertBuiltinTemplateValid(t *testing.T, template string, settings map[stri err = renderer.persistToDisk(ctx, out) require.NoError(t, err) + // Verify permissions on file and directory + testutil.AssertFilePermissions(t, filepath.Join(tempDir, "my_project/README.md"), defaultFilePermissions) + testutil.AssertDirPermissions(t, filepath.Join(tempDir, "my_project/resources"), defaultDirPermissions) + b, err := bundle.Load(ctx, filepath.Join(tempDir, "my_project")) require.NoError(t, err) diags := bundle.Apply(ctx, b, phases.LoadNamedTarget(target)) @@ -200,8 +207,7 @@ func TestRendererWithAssociatedTemplateInLibrary(t *testing.T) { } func TestRendererExecuteTemplate(t *testing.T) { - templateText := - `"{{.count}} items are made of {{.Material}}". + templateText := `"{{.count}} items are made of {{.Material}}". {{if eq .Animal "sheep" }} Sheep wool is the best! {{else}} @@ -256,7 +262,6 @@ func TestRendererExecuteTemplateWithUnknownProperty(t *testing.T) { } func TestRendererIsSkipped(t *testing.T) { - skipPatterns := []string{"a*", "*yz", "def", "a/b/*"} // skipped paths @@ -319,22 +324,22 @@ func TestRendererPersistToDisk(t *testing.T) { skipPatterns: []string{"a/b/c", "mn*"}, files: []file{ &inMemoryFile{ - perm: 0444, + perm: 0o444, relPath: "a/b/c", content: nil, }, &inMemoryFile{ - perm: 0444, + perm: 0o444, relPath: "mno", content: nil, }, &inMemoryFile{ - perm: 0444, + perm: 0o444, relPath: "a/b/d", content: []byte("123"), }, &inMemoryFile{ - perm: 0444, + perm: 0o444, relPath: "mmnn", content: []byte("456"), }, @@ -349,10 +354,10 @@ func TestRendererPersistToDisk(t *testing.T) { assert.NoFileExists(t, filepath.Join(tmpDir, "a", "b", "c")) assert.NoFileExists(t, filepath.Join(tmpDir, "mno")) - assertFileContent(t, filepath.Join(tmpDir, "a", "b", "d"), "123") - assertFilePermissions(t, filepath.Join(tmpDir, "a", "b", "d"), 0444) - assertFileContent(t, filepath.Join(tmpDir, "mmnn"), "456") - assertFilePermissions(t, filepath.Join(tmpDir, "mmnn"), 0444) + testutil.AssertFileContents(t, filepath.Join(tmpDir, "a/b/d"), "123") + testutil.AssertFilePermissions(t, filepath.Join(tmpDir, "a/b/d"), fs.FileMode(0o444)) + testutil.AssertFileContents(t, filepath.Join(tmpDir, "mmnn"), "456") + testutil.AssertFilePermissions(t, filepath.Join(tmpDir, "mmnn"), fs.FileMode(0o444)) } func TestRendererWalk(t *testing.T) { @@ -436,7 +441,7 @@ func TestRendererSkipAllFilesInCurrentDirectory(t *testing.T) { entries, err := os.ReadDir(tmpDir) require.NoError(t, err) // Assert none of the files are persisted to disk, because of {{skip "*"}} - assert.Len(t, entries, 0) + assert.Empty(t, entries) } func TestRendererSkipPatternsAreRelativeToFileDirectory(t *testing.T) { @@ -520,8 +525,8 @@ func TestRendererReadsPermissionsBits(t *testing.T) { } assert.Len(t, r.files, 2) - assert.Equal(t, getPermissions(r, "script.sh"), fs.FileMode(0755)) - assert.Equal(t, getPermissions(r, "not-a-script"), fs.FileMode(0644)) + assert.Equal(t, getPermissions(r, "script.sh"), fs.FileMode(0o755)) + assert.Equal(t, getPermissions(r, "not-a-script"), fs.FileMode(0o644)) } func TestRendererErrorOnConflictingFile(t *testing.T) { @@ -537,7 +542,7 @@ func TestRendererErrorOnConflictingFile(t *testing.T) { skipPatterns: []string{}, files: []file{ &inMemoryFile{ - perm: 0444, + perm: 0o444, relPath: "a", content: []byte("123"), }, @@ -546,7 +551,7 @@ func TestRendererErrorOnConflictingFile(t *testing.T) { out, err := filer.NewLocalClient(tmpDir) require.NoError(t, err) err = r.persistToDisk(ctx, out) - assert.EqualError(t, err, fmt.Sprintf("failed to initialize template, one or more files already exist: %s", "a")) + assert.EqualError(t, err, "failed to initialize template, one or more files already exist: "+"a") } func TestRendererNoErrorOnConflictingFileIfSkipped(t *testing.T) { @@ -563,7 +568,7 @@ func TestRendererNoErrorOnConflictingFileIfSkipped(t *testing.T) { skipPatterns: []string{"a"}, files: []file{ &inMemoryFile{ - perm: 0444, + perm: 0o444, relPath: "a", content: []byte("123"), }, @@ -590,8 +595,8 @@ func TestRendererNonTemplatesAreCreatedAsCopyFiles(t *testing.T) { assert.NoError(t, err) assert.Len(t, r.files, 1) - assert.Equal(t, r.files[0].(*copyFile).srcPath, "not-a-template") - assert.Equal(t, r.files[0].RelPath(), "not-a-template") + assert.Equal(t, "not-a-template", r.files[0].(*copyFile).srcPath) + assert.Equal(t, "not-a-template", r.files[0].RelPath()) } func TestRendererFileTreeRendering(t *testing.T) { @@ -611,7 +616,7 @@ func TestRendererFileTreeRendering(t *testing.T) { // Assert in memory representation is created. assert.Len(t, r.files, 1) - assert.Equal(t, r.files[0].RelPath(), "my_directory/my_file") + assert.Equal(t, "my_directory/my_file", r.files[0].RelPath()) out, err := filer.NewLocalClient(tmpDir) require.NoError(t, err) @@ -619,8 +624,8 @@ func TestRendererFileTreeRendering(t *testing.T) { require.NoError(t, err) // Assert files and directories are correctly materialized. - assert.DirExists(t, filepath.Join(tmpDir, "my_directory")) - assert.FileExists(t, filepath.Join(tmpDir, "my_directory", "my_file")) + testutil.AssertDirPermissions(t, filepath.Join(tmpDir, "my_directory"), defaultDirPermissions) + testutil.AssertFilePermissions(t, filepath.Join(tmpDir, "my_directory", "my_file"), defaultFilePermissions) } func TestRendererSubTemplateInPath(t *testing.T) { diff --git a/libs/template/templates/dbt-sql/template/{{.project_name}}/.vscode/settings.json.tmpl b/libs/template/templates/dbt-sql/template/{{.project_name}}/.vscode/settings.json.tmpl index 562ba136f..3eca01226 100644 --- a/libs/template/templates/dbt-sql/template/{{.project_name}}/.vscode/settings.json.tmpl +++ b/libs/template/templates/dbt-sql/template/{{.project_name}}/.vscode/settings.json.tmpl @@ -1,6 +1,5 @@ { "python.analysis.stubPath": ".vscode", - "databricks.python.envFile": "${workspaceFolder}/.env", "jupyter.interactiveWindow.cellMarker.codeRegex": "^# COMMAND ----------|^# Databricks notebook source|^(#\\s*%%|#\\s*\\|#\\s*In\\[\\d*?\\]|#\\s*In\\[ \\])", "jupyter.interactiveWindow.cellMarker.default": "# COMMAND ----------", "python.testing.pytestArgs": [ diff --git a/libs/template/templates/dbt-sql/template/{{.project_name}}/databricks.yml.tmpl b/libs/template/templates/dbt-sql/template/{{.project_name}}/databricks.yml.tmpl index 5594749a9..ba336f6a1 100644 --- a/libs/template/templates/dbt-sql/template/{{.project_name}}/databricks.yml.tmpl +++ b/libs/template/templates/dbt-sql/template/{{.project_name}}/databricks.yml.tmpl @@ -3,6 +3,7 @@ # See https://docs.databricks.com/dev-tools/bundles/index.html for documentation. bundle: name: {{.project_name}} + uuid: {{bundle_uuid}} include: - resources/*.yml diff --git a/libs/template/templates/default-python/template/{{.project_name}}/.vscode/settings.json b/libs/template/templates/default-python/template/{{.project_name}}/.vscode/settings.json index f19498daa..8ee87c30d 100644 --- a/libs/template/templates/default-python/template/{{.project_name}}/.vscode/settings.json +++ b/libs/template/templates/default-python/template/{{.project_name}}/.vscode/settings.json @@ -1,6 +1,5 @@ { "python.analysis.stubPath": ".vscode", - "databricks.python.envFile": "${workspaceFolder}/.env", "jupyter.interactiveWindow.cellMarker.codeRegex": "^# COMMAND ----------|^# Databricks notebook source|^(#\\s*%%|#\\s*\\|#\\s*In\\[\\d*?\\]|#\\s*In\\[ \\])", "jupyter.interactiveWindow.cellMarker.default": "# COMMAND ----------", "python.testing.pytestArgs": [ diff --git a/libs/template/templates/default-python/template/{{.project_name}}/databricks.yml.tmpl b/libs/template/templates/default-python/template/{{.project_name}}/databricks.yml.tmpl index c42b822a8..4d052e38e 100644 --- a/libs/template/templates/default-python/template/{{.project_name}}/databricks.yml.tmpl +++ b/libs/template/templates/default-python/template/{{.project_name}}/databricks.yml.tmpl @@ -2,6 +2,7 @@ # See https://docs.databricks.com/dev-tools/bundles/index.html for documentation. bundle: name: {{.project_name}} + uuid: {{bundle_uuid}} include: - resources/*.yml diff --git a/libs/template/templates/default-sql/template/{{.project_name}}/.vscode/settings.json.tmpl b/libs/template/templates/default-sql/template/{{.project_name}}/.vscode/settings.json.tmpl index c63af24b4..03a365f9d 100644 --- a/libs/template/templates/default-sql/template/{{.project_name}}/.vscode/settings.json.tmpl +++ b/libs/template/templates/default-sql/template/{{.project_name}}/.vscode/settings.json.tmpl @@ -1,6 +1,5 @@ { "python.analysis.stubPath": ".vscode", - "databricks.python.envFile": "${workspaceFolder}/.env", "jupyter.interactiveWindow.cellMarker.codeRegex": "^# COMMAND ----------|^# Databricks notebook source|^(#\\s*%%|#\\s*\\|#\\s*In\\[\\d*?\\]|#\\s*In\\[ \\])", "jupyter.interactiveWindow.cellMarker.default": "# COMMAND ----------", "python.testing.pytestArgs": [ diff --git a/libs/template/templates/default-sql/template/{{.project_name}}/databricks.yml.tmpl b/libs/template/templates/default-sql/template/{{.project_name}}/databricks.yml.tmpl index 51d03e99a..84e07df17 100644 --- a/libs/template/templates/default-sql/template/{{.project_name}}/databricks.yml.tmpl +++ b/libs/template/templates/default-sql/template/{{.project_name}}/databricks.yml.tmpl @@ -2,6 +2,7 @@ # See https://docs.databricks.com/dev-tools/bundles/index.html for documentation. bundle: name: {{.project_name}} + uuid: {{bundle_uuid}} include: - resources/*.yml diff --git a/libs/testdiff/golden.go b/libs/testdiff/golden.go new file mode 100644 index 000000000..02213c88a --- /dev/null +++ b/libs/testdiff/golden.go @@ -0,0 +1,224 @@ +package testdiff + +import ( + "context" + "flag" + "fmt" + "os" + "regexp" + "slices" + "strings" + "testing" + + "github.com/databricks/cli/internal/testutil" + "github.com/databricks/cli/libs/iamutil" + "github.com/databricks/databricks-sdk-go" + "github.com/databricks/databricks-sdk-go/service/iam" + "github.com/stretchr/testify/assert" +) + +var OverwriteMode = false + +func init() { + flag.BoolVar(&OverwriteMode, "update", false, "Overwrite golden files") +} + +func ReadFile(t testutil.TestingT, ctx context.Context, filename string) string { + t.Helper() + data, err := os.ReadFile(filename) + if os.IsNotExist(err) { + return "" + } + assert.NoError(t, err, "Failed to read %s", filename) + // On CI, on Windows \n in the file somehow end up as \r\n + return NormalizeNewlines(string(data)) +} + +func WriteFile(t testutil.TestingT, filename, data string) { + t.Helper() + t.Logf("Overwriting %s", filename) + err := os.WriteFile(filename, []byte(data), 0o644) + assert.NoError(t, err, "Failed to write %s", filename) +} + +func AssertOutput(t testutil.TestingT, ctx context.Context, out, outTitle, expectedPath string) { + t.Helper() + expected := ReadFile(t, ctx, expectedPath) + + out = ReplaceOutput(t, ctx, out) + + if out != expected { + AssertEqualTexts(t, expectedPath, outTitle, expected, out) + + if OverwriteMode { + WriteFile(t, expectedPath, out) + } + } +} + +func AssertOutputJQ(t testutil.TestingT, ctx context.Context, out, outTitle, expectedPath string, ignorePaths []string) { + t.Helper() + expected := ReadFile(t, ctx, expectedPath) + + out = ReplaceOutput(t, ctx, out) + + if out != expected { + AssertEqualJQ(t.(*testing.T), expectedPath, outTitle, expected, out, ignorePaths) + + if OverwriteMode { + WriteFile(t, expectedPath, out) + } + } +} + +var ( + uuidRegex = regexp.MustCompile(`[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}`) + numIdRegex = regexp.MustCompile(`[0-9]{3,}`) + privatePathRegex = regexp.MustCompile(`(/tmp|/private)(/.*)/([a-zA-Z0-9]+)`) +) + +func ReplaceOutput(t testutil.TestingT, ctx context.Context, out string) string { + t.Helper() + out = NormalizeNewlines(out) + replacements := GetReplacementsMap(ctx) + if replacements == nil { + t.Fatal("WithReplacementsMap was not called") + } + out = replacements.Replace(out) + out = uuidRegex.ReplaceAllString(out, "") + out = numIdRegex.ReplaceAllString(out, "") + out = privatePathRegex.ReplaceAllString(out, "/tmp/.../$3") + + return out +} + +type key int + +const ( + replacementsMapKey = key(1) +) + +type Replacement struct { + Old string + New string +} + +type ReplacementsContext struct { + Repls []Replacement +} + +func (r *ReplacementsContext) Replace(s string) string { + // QQQ Should probably only replace whole words + for _, repl := range r.Repls { + s = strings.ReplaceAll(s, repl.Old, repl.New) + } + return s +} + +func (r *ReplacementsContext) Set(old, new string) { + if old == "" || new == "" { + return + } + r.Repls = append(r.Repls, Replacement{Old: old, New: new}) +} + +func WithReplacementsMap(ctx context.Context) (context.Context, *ReplacementsContext) { + value := ctx.Value(replacementsMapKey) + if value != nil { + if existingMap, ok := value.(*ReplacementsContext); ok { + return ctx, existingMap + } + } + + newMap := &ReplacementsContext{} + ctx = context.WithValue(ctx, replacementsMapKey, newMap) + return ctx, newMap +} + +func GetReplacementsMap(ctx context.Context) *ReplacementsContext { + value := ctx.Value(replacementsMapKey) + if value != nil { + if existingMap, ok := value.(*ReplacementsContext); ok { + return existingMap + } + } + return nil +} + +func PrepareReplacements(t testutil.TestingT, r *ReplacementsContext, w *databricks.WorkspaceClient) { + t.Helper() + // in some clouds (gcp) w.Config.Host includes "https://" prefix in others it's really just a host (azure) + host := strings.TrimPrefix(strings.TrimPrefix(w.Config.Host, "http://"), "https://") + r.Set(host, "$DATABRICKS_HOST") + r.Set(w.Config.ClusterID, "$DATABRICKS_CLUSTER_ID") + r.Set(w.Config.WarehouseID, "$DATABRICKS_WAREHOUSE_ID") + r.Set(w.Config.ServerlessComputeID, "$DATABRICKS_SERVERLESS_COMPUTE_ID") + r.Set(w.Config.MetadataServiceURL, "$DATABRICKS_METADATA_SERVICE_URL") + r.Set(w.Config.AccountID, "$DATABRICKS_ACCOUNT_ID") + r.Set(w.Config.Token, "$DATABRICKS_TOKEN") + r.Set(w.Config.Username, "$DATABRICKS_USERNAME") + r.Set(w.Config.Password, "$DATABRICKS_PASSWORD") + r.Set(w.Config.Profile, "$DATABRICKS_CONFIG_PROFILE") + r.Set(w.Config.ConfigFile, "$DATABRICKS_CONFIG_FILE") + r.Set(w.Config.GoogleServiceAccount, "$DATABRICKS_GOOGLE_SERVICE_ACCOUNT") + r.Set(w.Config.GoogleCredentials, "$GOOGLE_CREDENTIALS") + r.Set(w.Config.AzureResourceID, "$DATABRICKS_AZURE_RESOURCE_ID") + r.Set(w.Config.AzureClientSecret, "$ARM_CLIENT_SECRET") + // r.Set(w.Config.AzureClientID, "$ARM_CLIENT_ID") + r.Set(w.Config.AzureClientID, "$USERNAME") + r.Set(w.Config.AzureTenantID, "$ARM_TENANT_ID") + r.Set(w.Config.ActionsIDTokenRequestURL, "$ACTIONS_ID_TOKEN_REQUEST_URL") + r.Set(w.Config.ActionsIDTokenRequestToken, "$ACTIONS_ID_TOKEN_REQUEST_TOKEN") + r.Set(w.Config.AzureEnvironment, "$ARM_ENVIRONMENT") + r.Set(w.Config.ClientID, "$DATABRICKS_CLIENT_ID") + r.Set(w.Config.ClientSecret, "$DATABRICKS_CLIENT_SECRET") + r.Set(w.Config.DatabricksCliPath, "$DATABRICKS_CLI_PATH") + // This is set to words like "path" that happen too frequently + // r.Set(w.Config.AuthType, "$DATABRICKS_AUTH_TYPE") +} + +func PrepareReplacementsUser(t testutil.TestingT, r *ReplacementsContext, u iam.User) { + t.Helper() + // There could be exact matches or overlap between different name fields, so sort them by length + // to ensure we match the largest one first and map them all to the same token + names := []string{ + u.DisplayName, + u.UserName, + iamutil.GetShortUserName(&u), + u.Name.FamilyName, + u.Name.GivenName, + } + if u.Name != nil { + names = append(names, u.Name.FamilyName) + names = append(names, u.Name.GivenName) + } + for _, val := range u.Emails { + names = append(names, val.Value) + } + stableSortReverseLength(names) + + for _, name := range names { + r.Set(name, "$USERNAME") + } + + for ind, val := range u.Groups { + r.Set(val.Value, fmt.Sprintf("$USER.Groups[%d]", ind)) + } + + r.Set(u.Id, "$USER.Id") + + for ind, val := range u.Roles { + r.Set(val.Value, fmt.Sprintf("$USER.Roles[%d]", ind)) + } +} + +func stableSortReverseLength(strs []string) { + slices.SortStableFunc(strs, func(a, b string) int { + return len(b) - len(a) + }) +} + +func NormalizeNewlines(input string) string { + output := strings.ReplaceAll(input, "\r\n", "\n") + return strings.ReplaceAll(output, "\r", "\n") +} diff --git a/libs/testdiff/golden_test.go b/libs/testdiff/golden_test.go new file mode 100644 index 000000000..0fc32be21 --- /dev/null +++ b/libs/testdiff/golden_test.go @@ -0,0 +1,13 @@ +package testdiff + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestSort(t *testing.T) { + input := []string{"a", "bc", "cd"} + stableSortReverseLength(input) + assert.Equal(t, []string{"bc", "cd", "a"}, input) +} diff --git a/libs/testdiff/testdiff.go b/libs/testdiff/testdiff.go new file mode 100644 index 000000000..fef1d5ae2 --- /dev/null +++ b/libs/testdiff/testdiff.go @@ -0,0 +1,94 @@ +package testdiff + +import ( + "fmt" + "strings" + + "github.com/databricks/cli/internal/testutil" + "github.com/hexops/gotextdiff" + "github.com/hexops/gotextdiff/myers" + "github.com/hexops/gotextdiff/span" + "github.com/stretchr/testify/assert" + "github.com/wI2L/jsondiff" +) + +func UnifiedDiff(filename1, filename2, s1, s2 string) string { + edits := myers.ComputeEdits(span.URIFromPath(filename1), s1, s2) + return fmt.Sprint(gotextdiff.ToUnified(filename1, filename2, s1, edits)) +} + +func AssertEqualTexts(t testutil.TestingT, filename1, filename2, expected, out string) { + t.Helper() + if len(out) < 1000 && len(expected) < 1000 { + // This shows full strings + diff which could be useful when debugging newlines + assert.Equal(t, expected, out, "%s vs %s", filename1, filename2) + } else { + // only show diff for large texts + diff := UnifiedDiff(filename1, filename2, expected, out) + if diff != "" { + t.Errorf("Diff:\n" + diff) + } + } +} + +func AssertEqualJQ(t testutil.TestingT, expectedName, outName, expected, out string, ignorePaths []string) { + t.Helper() + patch, err := jsondiff.CompareJSON([]byte(expected), []byte(out)) + if err != nil { + t.Logf("CompareJSON error for %s vs %s: %s (fallback to textual comparison)", outName, expectedName, err) + AssertEqualTexts(t, expectedName, outName, expected, out) + } else { + diff := UnifiedDiff(expectedName, outName, expected, out) + t.Logf("Diff:\n%s", diff) + allowedDiffs := []string{} + erroredDiffs := []string{} + for _, op := range patch { + if allowDifference(ignorePaths, op) { + allowedDiffs = append(allowedDiffs, fmt.Sprintf("%7s %s %v old=%v", op.Type, op.Path, op.Value, op.OldValue)) + } else { + erroredDiffs = append(erroredDiffs, fmt.Sprintf("%7s %s %v old=%v", op.Type, op.Path, op.Value, op.OldValue)) + } + } + if len(allowedDiffs) > 0 { + t.Logf("Allowed differences between %s and %s:\n ==> %s", expectedName, outName, strings.Join(allowedDiffs, "\n ==> ")) + } + if len(erroredDiffs) > 0 { + t.Errorf("Unexpected differences between %s and %s:\n ==> %s", expectedName, outName, strings.Join(erroredDiffs, "\n ==> ")) + } + } +} + +func allowDifference(ignorePaths []string, op jsondiff.Operation) bool { + if matchesPrefixes(ignorePaths, op.Path) { + return true + } + if op.Type == "replace" && almostSameStrings(op.OldValue, op.Value) { + return true + } + return false +} + +// compare strings and ignore forward vs backward slashes +func almostSameStrings(v1, v2 any) bool { + s1, ok := v1.(string) + if !ok { + return false + } + s2, ok := v2.(string) + if !ok { + return false + } + return strings.ReplaceAll(s1, "\\", "/") == strings.ReplaceAll(s2, "\\", "/") +} + +func matchesPrefixes(prefixes []string, path string) bool { + for _, p := range prefixes { + if p == path { + return true + } + if strings.HasPrefix(path, p+"/") { + return true + } + } + return false +} diff --git a/libs/testdiff/testdiff_test.go b/libs/testdiff/testdiff_test.go new file mode 100644 index 000000000..869fee78a --- /dev/null +++ b/libs/testdiff/testdiff_test.go @@ -0,0 +1,20 @@ +package testdiff + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestDiff(t *testing.T) { + assert.Equal(t, "", UnifiedDiff("a", "b", "", "")) + assert.Equal(t, "", UnifiedDiff("a", "b", "abc", "abc")) + assert.Equal(t, "--- a\n+++ b\n@@ -1 +1,2 @@\n abc\n+123\n", UnifiedDiff("a", "b", "abc\n", "abc\n123\n")) +} + +func TestMatchesPrefixes(t *testing.T) { + assert.False(t, matchesPrefixes([]string{}, "")) + assert.False(t, matchesPrefixes([]string{"/hello", "/hello/world"}, "")) + assert.True(t, matchesPrefixes([]string{"/hello", "/a/b"}, "/hello")) + assert.True(t, matchesPrefixes([]string{"/hello", "/a/b"}, "/a/b/c")) +} diff --git a/libs/textutil/textutil_test.go b/libs/textutil/textutil_test.go index f6834a1ef..b9268c98b 100644 --- a/libs/textutil/textutil_test.go +++ b/libs/textutil/textutil_test.go @@ -50,7 +50,8 @@ func TestNormalizeString(t *testing.T) { { input: ".test//test..test", expected: "test_test_test", - }} + }, + } for _, c := range cases { assert.Equal(t, c.expected, NormalizeString(c.input)) diff --git a/libs/vfs/filer_test.go b/libs/vfs/filer_test.go index ee1397521..6987c288e 100644 --- a/libs/vfs/filer_test.go +++ b/libs/vfs/filer_test.go @@ -2,7 +2,6 @@ package vfs import ( "context" - "errors" "io/fs" "os" "path/filepath" @@ -42,7 +41,7 @@ func TestFilerPath(t *testing.T) { // Open non-existent file. _, err = p.Open("doesntexist_test.go") - assert.True(t, errors.Is(err, fs.ErrNotExist)) + assert.ErrorIs(t, err, fs.ErrNotExist) // Stat self. s, err = p.Stat("filer_test.go") @@ -52,7 +51,7 @@ func TestFilerPath(t *testing.T) { // Stat non-existent file. _, err = p.Stat("doesntexist_test.go") - assert.True(t, errors.Is(err, fs.ErrNotExist)) + assert.ErrorIs(t, err, fs.ErrNotExist) // ReadDir self. entries, err := p.ReadDir(".") @@ -61,7 +60,7 @@ func TestFilerPath(t *testing.T) { // ReadDir non-existent directory. _, err = p.ReadDir("doesntexist") - assert.True(t, errors.Is(err, fs.ErrNotExist)) + assert.ErrorIs(t, err, fs.ErrNotExist) // ReadFile self. buf, err = p.ReadFile("filer_test.go") @@ -70,7 +69,7 @@ func TestFilerPath(t *testing.T) { // ReadFile non-existent file. _, err = p.ReadFile("doesntexist_test.go") - assert.True(t, errors.Is(err, fs.ErrNotExist)) + assert.ErrorIs(t, err, fs.ErrNotExist) // Parent self. pp := p.Parent() diff --git a/libs/vfs/leaf.go b/libs/vfs/leaf.go deleted file mode 100644 index 8c11f9039..000000000 --- a/libs/vfs/leaf.go +++ /dev/null @@ -1,29 +0,0 @@ -package vfs - -import ( - "errors" - "io/fs" -) - -// FindLeafInTree returns the first path that holds `name`, -// traversing up to the root of the filesystem, starting at `p`. -func FindLeafInTree(p Path, name string) (Path, error) { - for p != nil { - _, err := fs.Stat(p, name) - - // No error means we found the leaf in p. - if err == nil { - return p, nil - } - - // ErrNotExist means we continue traversal up the tree. - if errors.Is(err, fs.ErrNotExist) { - p = p.Parent() - continue - } - - return nil, err - } - - return nil, fs.ErrNotExist -} diff --git a/libs/vfs/leaf_test.go b/libs/vfs/leaf_test.go deleted file mode 100644 index da9412ec0..000000000 --- a/libs/vfs/leaf_test.go +++ /dev/null @@ -1,38 +0,0 @@ -package vfs - -import ( - "os" - "path/filepath" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestFindLeafInTree(t *testing.T) { - wd, err := os.Getwd() - require.NoError(t, err) - - root := filepath.Join(wd, "..", "..") - - // Find from working directory should work. - { - out, err := FindLeafInTree(MustNew(wd), ".git") - assert.NoError(t, err) - assert.Equal(t, root, out.Native()) - } - - // Find from project root itself should work. - { - out, err := FindLeafInTree(MustNew(root), ".git") - assert.NoError(t, err) - assert.Equal(t, root, out.Native()) - } - - // Find for something that doesn't exist should work. - { - out, err := FindLeafInTree(MustNew(root), "this-leaf-doesnt-exist-anywhere") - assert.ErrorIs(t, err, os.ErrNotExist) - assert.Equal(t, nil, out) - } -}