diff --git a/.codegen.json b/.codegen.json index 73ab8c2a4..735e1ee31 100644 --- a/.codegen.json +++ b/.codegen.json @@ -11,7 +11,7 @@ "required": ["go"], "post_generate": [ "go test -timeout 240s -run TestConsistentDatabricksSdkVersion github.com/databricks/cli/internal/build", - "go run ./bundle/internal/schema/*.go ./bundle/schema/jsonschema.json", + "make schema", "echo 'bundle/internal/tf/schema/\\*.go linguist-generated=true' >> ./.gitattributes", "echo 'go.sum linguist-generated=true' >> ./.gitattributes", "echo 'bundle/schema/jsonschema.json linguist-generated=true' >> ./.gitattributes" diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index a2ba58aa5..8622b29ca 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -f2385add116e3716c8a90a0b68e204deb40f996c \ No newline at end of file +a6a317df8327c9b1e5cb59a03a42ffa2aabeef6d \ No newline at end of file diff --git a/.codegen/service.go.tmpl b/.codegen/service.go.tmpl index ef7977e1b..ee2c7b0fd 100644 --- a/.codegen/service.go.tmpl +++ b/.codegen/service.go.tmpl @@ -411,5 +411,5 @@ func new{{.PascalName}}() *cobra.Command { {{- define "request-body-obj" -}} {{- $method := .Method -}} {{- $field := .Field -}} - {{$method.CamelName}}Req{{ if (and $method.RequestBodyField (not $field.IsPath)) }}.{{$method.RequestBodyField.PascalName}}{{end}}.{{$field.PascalName}} + {{$method.CamelName}}Req{{ if (and $method.RequestBodyField (and (not $field.IsPath) (not $field.IsQuery))) }}.{{$method.RequestBodyField.PascalName}}{{end}}.{{$field.PascalName}} {{- end -}} diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs new file mode 100644 index 000000000..6304b3604 --- /dev/null +++ b/.git-blame-ignore-revs @@ -0,0 +1,8 @@ +# Enable gofumpt and goimports in golangci-lint (#1999) +2e018cfaec200a02ee2bd5b389e7da3c6f15f460 + +# Enable errcheck everywhere and fix or silent remaining issues (#1987) +8d5351c1c3d7befda4baae5d6adb99367aa50b3c + +# Add error checking in tests and enable errcheck there (#1980) +1b2be1b2cb4b7909df2a8ad4cb6a0f43e8fcf0c6 diff --git a/.gitattributes b/.gitattributes index 2755c02d7..0a8ddf3cb 100755 --- a/.gitattributes +++ b/.gitattributes @@ -8,6 +8,7 @@ cmd/account/custom-app-integration/custom-app-integration.go linguist-generated= cmd/account/disable-legacy-features/disable-legacy-features.go linguist-generated=true cmd/account/encryption-keys/encryption-keys.go linguist-generated=true cmd/account/esm-enablement-account/esm-enablement-account.go linguist-generated=true +cmd/account/federation-policy/federation-policy.go linguist-generated=true cmd/account/groups/groups.go linguist-generated=true cmd/account/ip-access-lists/ip-access-lists.go linguist-generated=true cmd/account/log-delivery/log-delivery.go linguist-generated=true @@ -19,6 +20,7 @@ cmd/account/o-auth-published-apps/o-auth-published-apps.go linguist-generated=tr cmd/account/personal-compute/personal-compute.go linguist-generated=true cmd/account/private-access/private-access.go linguist-generated=true cmd/account/published-app-integration/published-app-integration.go linguist-generated=true +cmd/account/service-principal-federation-policy/service-principal-federation-policy.go linguist-generated=true cmd/account/service-principal-secrets/service-principal-secrets.go linguist-generated=true cmd/account/service-principals/service-principals.go linguist-generated=true cmd/account/settings/settings.go linguist-generated=true @@ -37,6 +39,9 @@ cmd/workspace/apps/apps.go linguist-generated=true cmd/workspace/artifact-allowlists/artifact-allowlists.go linguist-generated=true cmd/workspace/automatic-cluster-update/automatic-cluster-update.go linguist-generated=true cmd/workspace/catalogs/catalogs.go linguist-generated=true +cmd/workspace/clean-room-assets/clean-room-assets.go linguist-generated=true +cmd/workspace/clean-room-task-runs/clean-room-task-runs.go linguist-generated=true +cmd/workspace/clean-rooms/clean-rooms.go linguist-generated=true cmd/workspace/cluster-policies/cluster-policies.go linguist-generated=true cmd/workspace/clusters/clusters.go linguist-generated=true cmd/workspace/cmd.go linguist-generated=true diff --git a/.github/workflows/integration-approve.yml b/.github/workflows/integration-approve.yml new file mode 100644 index 000000000..4bdeb62a3 --- /dev/null +++ b/.github/workflows/integration-approve.yml @@ -0,0 +1,32 @@ +name: integration-approve + +on: + merge_group: + +jobs: + # Trigger for merge groups. + # + # Statuses and checks apply to specific commits (by hash). + # Enforcement of required checks is done both at the PR level and the merge queue level. + # In case of multiple commits in a single PR, the hash of the squashed commit + # will not match the one for the latest (approved) commit in the PR. + # + # We auto approve the check for the merge queue for two reasons: + # + # * Queue times out due to duration of tests. + # * Avoid running integration tests twice, since it was already run at the tip of the branch before squashing. + # + trigger: + runs-on: ubuntu-latest + + steps: + - name: Auto-approve squashed commit + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + shell: bash + run: | + gh api -X POST -H "Accept: application/vnd.github+json" \ + -H "X-GitHub-Api-Version: 2022-11-28" \ + /repos/${{ github.repository }}/statuses/${{ github.sha }} \ + -f 'state=success' \ + -f 'context=Integration Tests Check' diff --git a/.github/workflows/integration-main.yml b/.github/workflows/integration-main.yml new file mode 100644 index 000000000..064e439cf --- /dev/null +++ b/.github/workflows/integration-main.yml @@ -0,0 +1,33 @@ +name: integration-main + +on: + push: + branches: + - main + +jobs: + # Trigger for pushes to the main branch. + # + # This workflow triggers the integration test workflow in a different repository. + # It requires secrets from the "test-trigger-is" environment, which are only available to authorized users. + trigger: + runs-on: ubuntu-latest + environment: "test-trigger-is" + + steps: + - name: Generate GitHub App Token + id: generate-token + uses: actions/create-github-app-token@v1 + with: + app-id: ${{ secrets.DECO_WORKFLOW_TRIGGER_APP_ID }} + private-key: ${{ secrets.DECO_WORKFLOW_TRIGGER_PRIVATE_KEY }} + owner: ${{ secrets.ORG_NAME }} + repositories: ${{secrets.REPO_NAME}} + + - name: Trigger Workflow in Another Repo + env: + GH_TOKEN: ${{ steps.generate-token.outputs.token }} + run: | + gh workflow run cli-isolated-nightly.yml -R ${{ secrets.ORG_NAME }}/${{secrets.REPO_NAME}} \ + --ref main \ + -f commit_sha=${{ github.event.after }} diff --git a/.github/workflows/integration-pr.yml b/.github/workflows/integration-pr.yml new file mode 100644 index 000000000..bf2dcd8bc --- /dev/null +++ b/.github/workflows/integration-pr.yml @@ -0,0 +1,56 @@ +name: integration-pr + +on: + pull_request: + types: [opened, synchronize] + +jobs: + check-token: + runs-on: ubuntu-latest + environment: "test-trigger-is" + + outputs: + has_token: ${{ steps.set-token-status.outputs.has_token }} + + steps: + - name: Check if DECO_WORKFLOW_TRIGGER_APP_ID is set + id: set-token-status + run: | + if [ -z "${{ secrets.DECO_WORKFLOW_TRIGGER_APP_ID }}" ]; then + echo "DECO_WORKFLOW_TRIGGER_APP_ID is empty. User has no access to secrets." + echo "::set-output name=has_token::false" + else + echo "DECO_WORKFLOW_TRIGGER_APP_ID is set. User has access to secrets." + echo "::set-output name=has_token::true" + fi + + # Trigger for pull requests. + # + # This workflow triggers the integration test workflow in a different repository. + # It requires secrets from the "test-trigger-is" environment, which are only available to authorized users. + # It depends on the "check-token" workflow to confirm access to this environment to avoid failures. + trigger: + runs-on: ubuntu-latest + environment: "test-trigger-is" + + if: needs.check-token.outputs.has_token == 'true' + needs: check-token + + steps: + - name: Generate GitHub App Token + id: generate-token + uses: actions/create-github-app-token@v1 + with: + app-id: ${{ secrets.DECO_WORKFLOW_TRIGGER_APP_ID }} + private-key: ${{ secrets.DECO_WORKFLOW_TRIGGER_PRIVATE_KEY }} + owner: ${{ secrets.ORG_NAME }} + repositories: ${{secrets.REPO_NAME}} + + - name: Trigger Workflow in Another Repo + env: + GH_TOKEN: ${{ steps.generate-token.outputs.token }} + run: | + gh workflow run cli-isolated-pr.yml -R ${{ secrets.ORG_NAME }}/${{secrets.REPO_NAME}} \ + --ref main \ + -f pull_request_number=${{ github.event.pull_request.number }} \ + -f commit_sha=${{ github.event.pull_request.head.sha }} diff --git a/.github/workflows/integration-tests.yml b/.github/workflows/integration-tests.yml deleted file mode 100644 index d56728c28..000000000 --- a/.github/workflows/integration-tests.yml +++ /dev/null @@ -1,78 +0,0 @@ -name: integration - -on: - - pull_request: - types: [opened, synchronize] - - merge_group: - - -jobs: - check-token: - runs-on: ubuntu-latest - environment: "test-trigger-is" - outputs: - has_token: ${{ steps.set-token-status.outputs.has_token }} - steps: - - name: Check if DECO_WORKFLOW_TRIGGER_APP_ID is set - id: set-token-status - run: | - if [ -z "${{ secrets.DECO_WORKFLOW_TRIGGER_APP_ID }}" ]; then - echo "DECO_WORKFLOW_TRIGGER_APP_ID is empty. User has no access to secrets." - echo "::set-output name=has_token::false" - else - echo "DECO_WORKFLOW_TRIGGER_APP_ID is set. User has access to secrets." - echo "::set-output name=has_token::true" - fi - - trigger-tests: - runs-on: ubuntu-latest - needs: check-token - if: github.event_name == 'pull_request' && needs.check-token.outputs.has_token == 'true' - environment: "test-trigger-is" - - steps: - - uses: actions/checkout@v4 - - - name: Generate GitHub App Token - id: generate-token - uses: actions/create-github-app-token@v1 - with: - app-id: ${{ secrets.DECO_WORKFLOW_TRIGGER_APP_ID }} - private-key: ${{ secrets.DECO_WORKFLOW_TRIGGER_PRIVATE_KEY }} - owner: ${{ secrets.ORG_NAME }} - repositories: ${{secrets.REPO_NAME}} - - - name: Trigger Workflow in Another Repo - env: - GH_TOKEN: ${{ steps.generate-token.outputs.token }} - run: | - gh workflow run cli-isolated-pr.yml -R ${{ secrets.ORG_NAME }}/${{secrets.REPO_NAME}} \ - --ref main \ - -f pull_request_number=${{ github.event.pull_request.number }} \ - -f commit_sha=${{ github.event.pull_request.head.sha }} - - - - # Statuses and checks apply to specific commits (by hash). - # Enforcement of required checks is done both at the PR level and the merge queue level. - # In case of multiple commits in a single PR, the hash of the squashed commit - # will not match the one for the latest (approved) commit in the PR. - # We auto approve the check for the merge queue for two reasons: - # * Queue times out due to duration of tests. - # * Avoid running integration tests twice, since it was already run at the tip of the branch before squashing. - auto-approve: - if: github.event_name == 'merge_group' - runs-on: ubuntu-latest - steps: - - name: Mark Check - env: - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - shell: bash - run: | - gh api -X POST -H "Accept: application/vnd.github+json" \ - -H "X-GitHub-Api-Version: 2022-11-28" \ - /repos/${{ github.repository }}/statuses/${{ github.sha }} \ - -f 'state=success' \ - -f 'context=Integration Tests Check' diff --git a/.github/workflows/push.yml b/.github/workflows/push.yml index ebb3e75d4..a51927594 100644 --- a/.github/workflows/push.yml +++ b/.github/workflows/push.yml @@ -33,18 +33,21 @@ jobs: - name: Setup Go uses: actions/setup-go@v5 with: - go-version: 1.23.2 + go-version: 1.23.4 - name: Setup Python uses: actions/setup-python@v5 with: python-version: '3.9' + - name: Install uv + uses: astral-sh/setup-uv@v4 + - name: Set go env run: | echo "GOPATH=$(go env GOPATH)" >> $GITHUB_ENV echo "$(go env GOPATH)/bin" >> $GITHUB_PATH - go install gotest.tools/gotestsum@latest + go install gotest.tools/gotestsum@v1.12.0 - name: Pull external libraries run: | @@ -54,41 +57,6 @@ jobs: - name: Run tests run: make testonly - - name: Publish test coverage - uses: codecov/codecov-action@v4 - - fmt: - runs-on: ubuntu-latest - - steps: - - name: Checkout - uses: actions/checkout@v4 - - - name: Setup Go - uses: actions/setup-go@v5 - with: - go-version: 1.23.2 - - # No need to download cached dependencies when running gofmt. - cache: false - - - name: Install goimports - run: | - go install golang.org/x/tools/cmd/goimports@latest - - - name: Run make fmt - run: | - make fmt - - - name: Run go mod tidy - run: | - go mod tidy - - - name: Fail on differences - run: | - # Exit with status code 1 if there are differences (i.e. unformatted files) - git diff --exit-code - golangci: name: lint runs-on: ubuntu-latest @@ -96,7 +64,14 @@ jobs: - uses: actions/checkout@v4 - uses: actions/setup-go@v5 with: - go-version: 1.23.2 + go-version: 1.23.4 + - name: Run go mod tidy + run: | + go mod tidy + - name: Fail on differences + run: | + # Exit with status code 1 if there are differences (i.e. unformatted files) + git diff --exit-code - name: golangci-lint uses: golangci/golangci-lint-action@v6 with: @@ -113,7 +88,7 @@ jobs: - name: Setup Go uses: actions/setup-go@v5 with: - go-version: 1.23.2 + go-version: 1.23.4 # Github repo: https://github.com/ajv-validator/ajv-cli - name: Install ajv-cli @@ -124,14 +99,19 @@ jobs: # By default the ajv-cli runs in strict mode which will fail if the schema # itself is not valid. Strict mode is more strict than the JSON schema # specification. See for details: https://ajv.js.org/options.html#strict-mode-options + # The ajv-cli is configured to use the markdownDescription keyword which is not part of the JSON schema specification, + # but is used in editors like VSCode to render markdown in the description field - name: Validate bundle schema run: | go run main.go bundle schema > schema.json + # Add markdownDescription keyword to ajv + echo "module.exports=function(a){a.addKeyword('markdownDescription')}" >> keywords.js + for file in ./bundle/internal/schema/testdata/pass/*.yml; do - ajv test -s schema.json -d $file --valid + ajv test -s schema.json -d $file --valid -c=./keywords.js done for file in ./bundle/internal/schema/testdata/fail/*.yml; do - ajv test -s schema.json -d $file --invalid + ajv test -s schema.json -d $file --invalid -c=./keywords.js done diff --git a/.github/workflows/release-snapshot.yml b/.github/workflows/release-snapshot.yml index 4a7597dc0..7ef8b43c9 100644 --- a/.github/workflows/release-snapshot.yml +++ b/.github/workflows/release-snapshot.yml @@ -31,7 +31,7 @@ jobs: - name: Setup Go uses: actions/setup-go@v5 with: - go-version: 1.23.2 + go-version: 1.23.4 # The default cache key for this action considers only the `go.sum` file. # We include .goreleaser.yaml here to differentiate from the cache used by the push action diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index e8f59f9b8..e4a253531 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -22,7 +22,7 @@ jobs: - name: Setup Go uses: actions/setup-go@v5 with: - go-version: 1.23.2 + go-version: 1.23.4 # The default cache key for this action considers only the `go.sum` file. # We include .goreleaser.yaml here to differentiate from the cache used by the push action diff --git a/.golangci.yaml b/.golangci.yaml index 82e4d9848..9e69e5146 100644 --- a/.golangci.yaml +++ b/.golangci.yaml @@ -2,21 +2,37 @@ linters: disable-all: true enable: - bodyclose - # errcheck and govet are part of default setup and should be included but give too many errors now - # once errors are fixed, they should be enabled here: - #- errcheck + - errcheck - gosimple - #- govet + - govet - ineffassign - staticcheck - unused - gofmt + - gofumpt + - goimports linters-settings: + govet: + enable-all: true + disable: + - fieldalignment + - shadow gofmt: rewrite-rules: - pattern: 'a[b:len(a)]' replacement: 'a[b:]' - pattern: 'interface{}' replacement: 'any' + errcheck: + exclude-functions: + - (*github.com/spf13/cobra.Command).RegisterFlagCompletionFunc + - (*github.com/spf13/cobra.Command).MarkFlagRequired + - (*github.com/spf13/pflag.FlagSet).MarkDeprecated + - (*github.com/spf13/pflag.FlagSet).MarkHidden + gofumpt: + module-path: github.com/databricks/cli + extra-rules: true + #goimports: + # local-prefixes: github.com/databricks/cli issues: exclude-dirs-use-default: false # recommended by docs https://golangci-lint.run/usage/false-positives/ diff --git a/.vscode/settings.json b/.vscode/settings.json index 853e84de8..f8b04f126 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -7,11 +7,14 @@ "go.lintFlags": [ "--fast" ], + "go.useLanguageServer": true, + "gopls": { + "formatting.gofumpt": true + }, "files.trimTrailingWhitespace": true, "files.insertFinalNewline": true, "files.trimFinalNewlines": true, "python.envFile": "${workspaceRoot}/.env", - "databricks.python.envFile": "${workspaceFolder}/.env", "python.analysis.stubPath": ".vscode", "jupyter.interactiveWindow.cellMarker.codeRegex": "^# COMMAND ----------|^# Databricks notebook source|^(#\\s*%%|#\\s*\\|#\\s*In\\[\\d*?\\]|#\\s*In\\[ \\])", "jupyter.interactiveWindow.cellMarker.default": "# COMMAND ----------" diff --git a/CHANGELOG.md b/CHANGELOG.md index 56207686a..6bdb0795b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,28 @@ # Version changelog +## [Release] Release v0.237.0 + +Bundles: + * Allow overriding compute for non-development mode targets ([#1899](https://github.com/databricks/cli/pull/1899)). + * Show an error when using a cluster override with 'mode: production' ([#1994](https://github.com/databricks/cli/pull/1994)). + +API Changes: + * Added `databricks account federation-policy` command group. + * Added `databricks account service-principal-federation-policy` command group. + * Added `databricks aibi-dashboard-embedding-access-policy delete` command. + * Added `databricks aibi-dashboard-embedding-approved-domains delete` command. + +OpenAPI commit a6a317df8327c9b1e5cb59a03a42ffa2aabeef6d (2024-12-16) +Dependency updates: + * Upgrade TF provider to 1.62.0 ([#2030](https://github.com/databricks/cli/pull/2030)). + * Upgrade Go SDK to 0.54.0 ([#2029](https://github.com/databricks/cli/pull/2029)). + * Bump TF codegen dependencies to latest ([#1961](https://github.com/databricks/cli/pull/1961)). + * Bump golang.org/x/term from 0.26.0 to 0.27.0 ([#1983](https://github.com/databricks/cli/pull/1983)). + * Bump golang.org/x/sync from 0.9.0 to 0.10.0 ([#1984](https://github.com/databricks/cli/pull/1984)). + * Bump github.com/databricks/databricks-sdk-go from 0.52.0 to 0.53.0 ([#1985](https://github.com/databricks/cli/pull/1985)). + * Bump golang.org/x/crypto from 0.24.0 to 0.31.0 ([#2006](https://github.com/databricks/cli/pull/2006)). + * Bump golang.org/x/crypto from 0.30.0 to 0.31.0 in /bundle/internal/tf/codegen ([#2005](https://github.com/databricks/cli/pull/2005)). + ## [Release] Release v0.236.0 **New features for Databricks Asset Bundles:** diff --git a/Makefile b/Makefile index bc1c42cfa..f8e7834a5 100644 --- a/Makefile +++ b/Makefile @@ -1,19 +1,13 @@ default: build -fmt: - @echo "✓ Formatting source code with goimports ..." - @goimports -w $(shell find . -type f -name '*.go' -not -path "./vendor/*") - @echo "✓ Formatting source code with gofmt ..." - @gofmt -w $(shell find . -type f -name '*.go' -not -path "./vendor/*") - lint: vendor + @echo "✓ Linting source code with https://golangci-lint.run/ (with --fix)..." + @golangci-lint run --fix ./... + +lintcheck: vendor @echo "✓ Linting source code with https://golangci-lint.run/ ..." @golangci-lint run ./... -lintfix: vendor - @echo "✓ Linting source code with 'golangci-lint run --fix' ..." - @golangci-lint run --fix ./... - test: lint testonly testonly: @@ -35,8 +29,17 @@ snapshot: vendor: @echo "✓ Filling vendor folder with library code ..." @go mod vendor + +schema: + @echo "✓ Generating json-schema ..." + @go run ./bundle/internal/schema ./bundle/internal/schema ./bundle/schema/jsonschema.json + +INTEGRATION = gotestsum --format github-actions --rerun-fails --jsonfile output.json --packages "./integration/..." -- -parallel 4 -timeout=2h integration: - gotestsum --format github-actions --rerun-fails --jsonfile output.json --packages "./internal/..." -- -run "TestAcc.*" -parallel 4 -timeout=2h + $(INTEGRATION) -.PHONY: fmt lint lintfix test testonly coverage build snapshot vendor integration +integration-short: + $(INTEGRATION) -short + +.PHONY: lint lintcheck test testonly coverage build snapshot vendor schema integration integration-short diff --git a/NOTICE b/NOTICE index d8306510e..f6b59e0b0 100644 --- a/NOTICE +++ b/NOTICE @@ -73,10 +73,6 @@ fatih/color - https://github.com/fatih/color Copyright (c) 2013 Fatih Arslan License - https://github.com/fatih/color/blob/main/LICENSE.md -ghodss/yaml - https://github.com/ghodss/yaml -Copyright (c) 2014 Sam Ghods -License - https://github.com/ghodss/yaml/blob/master/LICENSE - Masterminds/semver - https://github.com/Masterminds/semver Copyright (C) 2014-2019, Matt Butcher and Matt Farina License - https://github.com/Masterminds/semver/blob/master/LICENSE.txt @@ -101,3 +97,11 @@ License - https://github.com/stretchr/testify/blob/master/LICENSE whilp/git-urls - https://github.com/whilp/git-urls Copyright (c) 2020 Will Maier License - https://github.com/whilp/git-urls/blob/master/LICENSE + +github.com/wI2L/jsondiff v0.6.1 +Copyright (c) 2020-2024 William Poussier +License - https://github.com/wI2L/jsondiff/blob/master/LICENSE + +https://github.com/hexops/gotextdiff +Copyright (c) 2009 The Go Authors. All rights reserved. +License - https://github.com/hexops/gotextdiff/blob/main/LICENSE diff --git a/bundle/artifacts/all.go b/bundle/artifacts/all.go index 305193e2e..768ccdfe3 100644 --- a/bundle/artifacts/all.go +++ b/bundle/artifacts/all.go @@ -3,7 +3,6 @@ package artifacts import ( "context" "fmt" - "slices" "github.com/databricks/cli/bundle" diff --git a/bundle/artifacts/autodetect.go b/bundle/artifacts/autodetect.go index 569a480f0..c8d235616 100644 --- a/bundle/artifacts/autodetect.go +++ b/bundle/artifacts/autodetect.go @@ -13,8 +13,7 @@ func DetectPackages() bundle.Mutator { return &autodetect{} } -type autodetect struct { -} +type autodetect struct{} func (m *autodetect) Name() string { return "artifacts.DetectPackages" diff --git a/bundle/artifacts/expand_globs.go b/bundle/artifacts/expand_globs.go index cdf3d4590..7d44db0be 100644 --- a/bundle/artifacts/expand_globs.go +++ b/bundle/artifacts/expand_globs.go @@ -96,7 +96,6 @@ func (m *expandGlobs) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnost // Set the expanded globs back into the configuration. return dyn.SetByPath(v, base, dyn.V(output)) }) - if err != nil { return diag.FromErr(err) } diff --git a/bundle/artifacts/whl/autodetect.go b/bundle/artifacts/whl/autodetect.go index 88dc742c1..202ea12bc 100644 --- a/bundle/artifacts/whl/autodetect.go +++ b/bundle/artifacts/whl/autodetect.go @@ -15,8 +15,7 @@ import ( "github.com/databricks/cli/libs/log" ) -type detectPkg struct { -} +type detectPkg struct{} func DetectPackage() bundle.Mutator { return &detectPkg{} @@ -42,7 +41,7 @@ func (m *detectPkg) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostic return nil } - log.Infof(ctx, fmt.Sprintf("Found Python wheel project at %s", b.BundleRootPath)) + log.Infof(ctx, "Found Python wheel project at %s", b.BundleRootPath) module := extractModuleName(setupPy) if b.Config.Artifacts == nil { diff --git a/bundle/artifacts/whl/infer.go b/bundle/artifacts/whl/infer.go index cb727de0e..604bfc449 100644 --- a/bundle/artifacts/whl/infer.go +++ b/bundle/artifacts/whl/infer.go @@ -16,12 +16,6 @@ type infer struct { func (m *infer) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { artifact := b.Config.Artifacts[m.name] - // TODO use python.DetectVEnvExecutable once bundle has a way to specify venv path - py, err := python.DetectExecutable(ctx) - if err != nil { - return diag.FromErr(err) - } - // Note: using --build-number (build tag) flag does not help with re-installing // libraries on all-purpose clusters. The reason is that `pip` ignoring build tag // when upgrading the library and only look at wheel version. @@ -36,7 +30,9 @@ func (m *infer) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { // version=datetime.datetime.utcnow().strftime("%Y%m%d.%H%M%S"), // ... //) - artifact.BuildCommand = fmt.Sprintf(`"%s" setup.py bdist_wheel`, py) + + py := python.GetExecutable() + artifact.BuildCommand = fmt.Sprintf(`%s setup.py bdist_wheel`, py) return nil } diff --git a/bundle/bundle.go b/bundle/bundle.go index 76c87c24c..573bcef2f 100644 --- a/bundle/bundle.go +++ b/bundle/bundle.go @@ -186,7 +186,7 @@ func (b *Bundle) CacheDir(ctx context.Context, paths ...string) (string, error) // Make directory if it doesn't exist yet. dir := filepath.Join(parts...) - err := os.MkdirAll(dir, 0700) + err := os.MkdirAll(dir, 0o700) if err != nil { return "", err } @@ -203,7 +203,7 @@ func (b *Bundle) InternalDir(ctx context.Context) (string, error) { } dir := filepath.Join(cacheDir, internalFolder) - err = os.MkdirAll(dir, 0700) + err = os.MkdirAll(dir, 0o700) if err != nil { return dir, err } diff --git a/bundle/config/experimental.go b/bundle/config/experimental.go index 061bbdae0..4c787168f 100644 --- a/bundle/config/experimental.go +++ b/bundle/config/experimental.go @@ -47,8 +47,10 @@ type PyDABs struct { Import []string `json:"import,omitempty"` } -type Command string -type ScriptHook string +type ( + Command string + ScriptHook string +) // These hook names are subject to change and currently experimental const ( diff --git a/bundle/config/generate/job.go b/bundle/config/generate/job.go index 6cd7c1b32..0cdcbf3ad 100644 --- a/bundle/config/generate/job.go +++ b/bundle/config/generate/job.go @@ -6,8 +6,10 @@ import ( "github.com/databricks/databricks-sdk-go/service/jobs" ) -var jobOrder = yamlsaver.NewOrder([]string{"name", "job_clusters", "compute", "tasks"}) -var taskOrder = yamlsaver.NewOrder([]string{"task_key", "depends_on", "existing_cluster_id", "new_cluster", "job_cluster_key"}) +var ( + jobOrder = yamlsaver.NewOrder([]string{"name", "job_clusters", "compute", "tasks"}) + taskOrder = yamlsaver.NewOrder([]string{"task_key", "depends_on", "existing_cluster_id", "new_cluster", "job_cluster_key"}) +) func ConvertJobToValue(job *jobs.Job) (dyn.Value, error) { value := make(map[string]dyn.Value) diff --git a/bundle/config/loader/process_root_includes.go b/bundle/config/loader/process_root_includes.go index c14fb7ce1..c608a3de6 100644 --- a/bundle/config/loader/process_root_includes.go +++ b/bundle/config/loader/process_root_includes.go @@ -27,7 +27,7 @@ func (m *processRootIncludes) Apply(ctx context.Context, b *bundle.Bundle) diag. var out []bundle.Mutator // Map with files we've already seen to avoid loading them twice. - var seen = map[string]bool{} + seen := map[string]bool{} for _, file := range config.FileNames { seen[file] = true diff --git a/bundle/config/mutator/apply_presets_test.go b/bundle/config/mutator/apply_presets_test.go index 91d5b62e5..c26f20383 100644 --- a/bundle/config/mutator/apply_presets_test.go +++ b/bundle/config/mutator/apply_presets_test.go @@ -481,5 +481,4 @@ func TestApplyPresetsSourceLinkedDeployment(t *testing.T) { require.Equal(t, tt.expectedValue, b.Config.Presets.SourceLinkedDeployment) }) } - } diff --git a/bundle/config/mutator/compute_id_compat.go b/bundle/config/mutator/compute_id_compat.go index 3afe02e9e..8f1ff5868 100644 --- a/bundle/config/mutator/compute_id_compat.go +++ b/bundle/config/mutator/compute_id_compat.go @@ -42,7 +42,6 @@ func rewriteComputeIdToClusterId(v dyn.Value, p dyn.Path) (dyn.Value, diag.Diagn var diags diag.Diagnostics computeIdPath := p.Append(dyn.Key("compute_id")) computeId, err := dyn.GetByPath(v, computeIdPath) - // If the "compute_id" key is not set, we don't need to do anything. if err != nil { return v, nil diff --git a/bundle/config/mutator/expand_pipeline_glob_paths_test.go b/bundle/config/mutator/expand_pipeline_glob_paths_test.go index 9f70b74ae..7cf3c9f3e 100644 --- a/bundle/config/mutator/expand_pipeline_glob_paths_test.go +++ b/bundle/config/mutator/expand_pipeline_glob_paths_test.go @@ -17,7 +17,7 @@ import ( ) func touchEmptyFile(t *testing.T, path string) { - err := os.MkdirAll(filepath.Dir(path), 0700) + err := os.MkdirAll(filepath.Dir(path), 0o700) require.NoError(t, err) f, err := os.Create(path) require.NoError(t, err) diff --git a/bundle/config/mutator/expand_workspace_root.go b/bundle/config/mutator/expand_workspace_root.go index 3f0547de1..a29d129b0 100644 --- a/bundle/config/mutator/expand_workspace_root.go +++ b/bundle/config/mutator/expand_workspace_root.go @@ -28,7 +28,7 @@ func (m *expandWorkspaceRoot) Apply(ctx context.Context, b *bundle.Bundle) diag. } currentUser := b.Config.Workspace.CurrentUser - if currentUser == nil || currentUser.UserName == "" { + if currentUser == nil || currentUser.User == nil || currentUser.UserName == "" { return diag.Errorf("unable to expand workspace root: current user not set") } diff --git a/bundle/config/mutator/initialize_urls.go b/bundle/config/mutator/initialize_urls.go index 319305912..35ff53d0b 100644 --- a/bundle/config/mutator/initialize_urls.go +++ b/bundle/config/mutator/initialize_urls.go @@ -10,8 +10,7 @@ import ( "github.com/databricks/cli/libs/diag" ) -type initializeURLs struct { -} +type initializeURLs struct{} // InitializeURLs makes sure the URL field of each resource is configured. // NOTE: since this depends on an extra API call, this mutator adds some extra @@ -32,11 +31,14 @@ func (m *initializeURLs) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagn } orgId := strconv.FormatInt(workspaceId, 10) host := b.WorkspaceClient().Config.CanonicalHostName() - initializeForWorkspace(b, orgId, host) + err = initializeForWorkspace(b, orgId, host) + if err != nil { + return diag.FromErr(err) + } return nil } -func initializeForWorkspace(b *bundle.Bundle, orgId string, host string) error { +func initializeForWorkspace(b *bundle.Bundle, orgId, host string) error { baseURL, err := url.Parse(host) if err != nil { return err diff --git a/bundle/config/mutator/initialize_urls_test.go b/bundle/config/mutator/initialize_urls_test.go index ec4e790c4..f07a7deb3 100644 --- a/bundle/config/mutator/initialize_urls_test.go +++ b/bundle/config/mutator/initialize_urls_test.go @@ -110,7 +110,8 @@ func TestInitializeURLs(t *testing.T) { "dashboard1": "https://mycompany.databricks.com/dashboardsv3/01ef8d56871e1d50ae30ce7375e42478/published?o=123456", } - initializeForWorkspace(b, "123456", "https://mycompany.databricks.com/") + err := initializeForWorkspace(b, "123456", "https://mycompany.databricks.com/") + require.NoError(t, err) for _, group := range b.Config.Resources.AllResources() { for key, r := range group.Resources { @@ -133,7 +134,8 @@ func TestInitializeURLsWithoutOrgId(t *testing.T) { }, } - initializeForWorkspace(b, "123456", "https://adb-123456.azuredatabricks.net/") + err := initializeForWorkspace(b, "123456", "https://adb-123456.azuredatabricks.net/") + require.NoError(t, err) require.Equal(t, "https://adb-123456.azuredatabricks.net/jobs/1", b.Config.Resources.Jobs["job1"].URL) } diff --git a/bundle/config/mutator/load_git_details.go b/bundle/config/mutator/load_git_details.go index 82255552a..5c263ac03 100644 --- a/bundle/config/mutator/load_git_details.go +++ b/bundle/config/mutator/load_git_details.go @@ -2,6 +2,8 @@ package mutator import ( "context" + "errors" + "os" "path/filepath" "github.com/databricks/cli/bundle" @@ -24,7 +26,9 @@ func (m *loadGitDetails) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagn var diags diag.Diagnostics info, err := git.FetchRepositoryInfo(ctx, b.BundleRoot.Native(), b.WorkspaceClient()) if err != nil { - diags = append(diags, diag.WarningFromErr(err)...) + if !errors.Is(err, os.ErrNotExist) { + diags = append(diags, diag.WarningFromErr(err)...) + } } if info.WorktreeRoot == "" { diff --git a/bundle/config/mutator/override_compute.go b/bundle/config/mutator/override_compute.go index 5700cdf26..343303402 100644 --- a/bundle/config/mutator/override_compute.go +++ b/bundle/config/mutator/override_compute.go @@ -6,6 +6,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/cli/libs/cmdio" "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/env" ) @@ -22,7 +23,7 @@ func (m *overrideCompute) Name() string { func overrideJobCompute(j *resources.Job, compute string) { for i := range j.Tasks { - var task = &j.Tasks[i] + task := &j.Tasks[i] if task.ForEachTask != nil { task = &task.ForEachTask.Task @@ -38,18 +39,32 @@ func overrideJobCompute(j *resources.Job, compute string) { } func (m *overrideCompute) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { - if b.Config.Bundle.Mode != config.Development { + var diags diag.Diagnostics + + if b.Config.Bundle.Mode == config.Production { if b.Config.Bundle.ClusterId != "" { - return diag.Errorf("cannot override compute for an target that does not use 'mode: development'") + // Overriding compute via a command-line flag for production works, but is not recommended. + diags = diags.Extend(diag.Diagnostics{{ + Summary: "Setting a cluster override for a target that uses 'mode: production' is not recommended", + Detail: "It is recommended to always use the same compute for production target for consistency.", + Severity: diag.Warning, + }}) } - return nil } if v := env.Get(ctx, "DATABRICKS_CLUSTER_ID"); v != "" { + // For historical reasons, we allow setting the cluster ID via the DATABRICKS_CLUSTER_ID + // when development mode is used. Sometimes, this is done by accident, so we log an info message. + if b.Config.Bundle.Mode == config.Development { + cmdio.LogString(ctx, "Setting a cluster override because DATABRICKS_CLUSTER_ID is set. It is recommended to use --cluster-id instead, which works in any target mode.") + } else { + // We don't allow using DATABRICKS_CLUSTER_ID in any other mode, it's too error-prone. + return diag.Warningf("The DATABRICKS_CLUSTER_ID variable is set but is ignored since the current target does not use 'mode: development'") + } b.Config.Bundle.ClusterId = v } if b.Config.Bundle.ClusterId == "" { - return nil + return diags } r := b.Config.Resources @@ -57,5 +72,5 @@ func (m *overrideCompute) Apply(ctx context.Context, b *bundle.Bundle) diag.Diag overrideJobCompute(r.Jobs[i], b.Config.Bundle.ClusterId) } - return nil + return diags } diff --git a/bundle/config/mutator/override_compute_test.go b/bundle/config/mutator/override_compute_test.go index 369447d7e..1fdeb373c 100644 --- a/bundle/config/mutator/override_compute_test.go +++ b/bundle/config/mutator/override_compute_test.go @@ -8,13 +8,14 @@ import ( "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config/mutator" "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/cli/libs/diag" "github.com/databricks/databricks-sdk-go/service/compute" "github.com/databricks/databricks-sdk-go/service/jobs" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) -func TestOverrideDevelopment(t *testing.T) { +func TestOverrideComputeModeDevelopment(t *testing.T) { t.Setenv("DATABRICKS_CLUSTER_ID", "") b := &bundle.Bundle{ Config: config.Root{ @@ -62,10 +63,13 @@ func TestOverrideDevelopment(t *testing.T) { assert.Empty(t, b.Config.Resources.Jobs["job1"].Tasks[3].JobClusterKey) } -func TestOverrideDevelopmentEnv(t *testing.T) { +func TestOverrideComputeModeDefaultIgnoresVariable(t *testing.T) { t.Setenv("DATABRICKS_CLUSTER_ID", "newClusterId") b := &bundle.Bundle{ Config: config.Root{ + Bundle: config.Bundle{ + Mode: "", + }, Resources: config.Resources{ Jobs: map[string]*resources.Job{ "job1": {JobSettings: &jobs.JobSettings{ @@ -86,11 +90,12 @@ func TestOverrideDevelopmentEnv(t *testing.T) { m := mutator.OverrideCompute() diags := bundle.Apply(context.Background(), b, m) - require.NoError(t, diags.Error()) + require.Len(t, diags, 1) + assert.Equal(t, "The DATABRICKS_CLUSTER_ID variable is set but is ignored since the current target does not use 'mode: development'", diags[0].Summary) assert.Equal(t, "cluster2", b.Config.Resources.Jobs["job1"].Tasks[1].ExistingClusterId) } -func TestOverridePipelineTask(t *testing.T) { +func TestOverrideComputePipelineTask(t *testing.T) { t.Setenv("DATABRICKS_CLUSTER_ID", "newClusterId") b := &bundle.Bundle{ Config: config.Root{ @@ -115,7 +120,7 @@ func TestOverridePipelineTask(t *testing.T) { assert.Empty(t, b.Config.Resources.Jobs["job1"].Tasks[0].ExistingClusterId) } -func TestOverrideForEachTask(t *testing.T) { +func TestOverrideComputeForEachTask(t *testing.T) { t.Setenv("DATABRICKS_CLUSTER_ID", "newClusterId") b := &bundle.Bundle{ Config: config.Root{ @@ -140,10 +145,11 @@ func TestOverrideForEachTask(t *testing.T) { assert.Empty(t, b.Config.Resources.Jobs["job1"].Tasks[0].ForEachTask.Task) } -func TestOverrideProduction(t *testing.T) { +func TestOverrideComputeModeProduction(t *testing.T) { b := &bundle.Bundle{ Config: config.Root{ Bundle: config.Bundle{ + Mode: config.Production, ClusterId: "newClusterID", }, Resources: config.Resources{ @@ -166,13 +172,19 @@ func TestOverrideProduction(t *testing.T) { m := mutator.OverrideCompute() diags := bundle.Apply(context.Background(), b, m) - require.True(t, diags.HasError()) + require.Len(t, diags, 1) + assert.Equal(t, "Setting a cluster override for a target that uses 'mode: production' is not recommended", diags[0].Summary) + assert.Equal(t, diag.Warning, diags[0].Severity) + assert.Equal(t, "newClusterID", b.Config.Resources.Jobs["job1"].Tasks[0].ExistingClusterId) } -func TestOverrideProductionEnv(t *testing.T) { +func TestOverrideComputeModeProductionIgnoresVariable(t *testing.T) { t.Setenv("DATABRICKS_CLUSTER_ID", "newClusterId") b := &bundle.Bundle{ Config: config.Root{ + Bundle: config.Bundle{ + Mode: config.Production, + }, Resources: config.Resources{ Jobs: map[string]*resources.Job{ "job1": {JobSettings: &jobs.JobSettings{ @@ -193,5 +205,7 @@ func TestOverrideProductionEnv(t *testing.T) { m := mutator.OverrideCompute() diags := bundle.Apply(context.Background(), b, m) - require.NoError(t, diags.Error()) + require.Len(t, diags, 1) + assert.Equal(t, "The DATABRICKS_CLUSTER_ID variable is set but is ignored since the current target does not use 'mode: development'", diags[0].Summary) + assert.Equal(t, "cluster2", b.Config.Resources.Jobs["job1"].Tasks[1].ExistingClusterId) } diff --git a/bundle/config/mutator/paths/job_paths_visitor.go b/bundle/config/mutator/paths/job_paths_visitor.go index 275a8fa53..1d713aaf5 100644 --- a/bundle/config/mutator/paths/job_paths_visitor.go +++ b/bundle/config/mutator/paths/job_paths_visitor.go @@ -95,7 +95,7 @@ func jobRewritePatterns() []jobRewritePattern { // VisitJobPaths visits all paths in job resources and applies a function to each path. func VisitJobPaths(value dyn.Value, fn VisitFunc) (dyn.Value, error) { var err error - var newValue = value + newValue := value for _, rewritePattern := range jobRewritePatterns() { newValue, err = dyn.MapByPattern(newValue, rewritePattern.pattern, func(p dyn.Path, v dyn.Value) (dyn.Value, error) { @@ -105,7 +105,6 @@ func VisitJobPaths(value dyn.Value, fn VisitFunc) (dyn.Value, error) { return fn(p, rewritePattern.kind, v) }) - if err != nil { return dyn.InvalidValue, err } diff --git a/bundle/config/mutator/prepend_workspace_prefix.go b/bundle/config/mutator/prepend_workspace_prefix.go index e0be2572d..b093ec26a 100644 --- a/bundle/config/mutator/prepend_workspace_prefix.go +++ b/bundle/config/mutator/prepend_workspace_prefix.go @@ -57,14 +57,12 @@ func (m *prependWorkspacePrefix) Apply(ctx context.Context, b *bundle.Bundle) di return dyn.NewValue(fmt.Sprintf("/Workspace%s", path), v.Locations()), nil }) - if err != nil { return dyn.InvalidValue, err } } return v, nil }) - if err != nil { return diag.FromErr(err) } diff --git a/bundle/config/mutator/python/python_diagnostics_test.go b/bundle/config/mutator/python/python_diagnostics_test.go index b73b0f73c..fd6def8da 100644 --- a/bundle/config/mutator/python/python_diagnostics_test.go +++ b/bundle/config/mutator/python/python_diagnostics_test.go @@ -30,7 +30,6 @@ type parsePythonDiagnosticsTest struct { } func TestParsePythonDiagnostics(t *testing.T) { - testCases := []parsePythonDiagnosticsTest{ { name: "short error with location", diff --git a/bundle/config/mutator/python/python_mutator.go b/bundle/config/mutator/python/python_mutator.go index da6c4d210..69c1a5dd6 100644 --- a/bundle/config/mutator/python/python_mutator.go +++ b/bundle/config/mutator/python/python_mutator.go @@ -9,12 +9,11 @@ import ( "io" "os" "path/filepath" + "strings" "github.com/databricks/databricks-sdk-go/logger" "github.com/fatih/color" - "strings" - "github.com/databricks/cli/libs/python" "github.com/databricks/cli/bundle/env" @@ -94,11 +93,10 @@ func (m *pythonMutator) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagno // mutateDiags is used because Mutate returns 'error' instead of 'diag.Diagnostics' var mutateDiags diag.Diagnostics - var mutateDiagsHasError = errors.New("unexpected error") + mutateDiagsHasError := errors.New("unexpected error") err := b.Config.Mutate(func(leftRoot dyn.Value) (dyn.Value, error) { pythonPath, err := detectExecutable(ctx, experimental.PyDABs.VEnvPath) - if err != nil { return dyn.InvalidValue, fmt.Errorf("failed to get Python interpreter path: %w", err) } @@ -141,7 +139,7 @@ func createCacheDir(ctx context.Context) (string, error) { // use 'default' as target name cacheDir := filepath.Join(tempDir, "default", "pydabs") - err := os.MkdirAll(cacheDir, 0700) + err := os.MkdirAll(cacheDir, 0o700) if err != nil { return "", err } @@ -152,7 +150,7 @@ func createCacheDir(ctx context.Context) (string, error) { return os.MkdirTemp("", "-pydabs") } -func (m *pythonMutator) runPythonMutator(ctx context.Context, cacheDir string, rootPath string, pythonPath string, root dyn.Value) (dyn.Value, diag.Diagnostics) { +func (m *pythonMutator) runPythonMutator(ctx context.Context, cacheDir, rootPath, pythonPath string, root dyn.Value) (dyn.Value, diag.Diagnostics) { inputPath := filepath.Join(cacheDir, "input.json") outputPath := filepath.Join(cacheDir, "output.json") diagnosticsPath := filepath.Join(cacheDir, "diagnostics.json") @@ -263,10 +261,10 @@ func writeInputFile(inputPath string, input dyn.Value) error { return fmt.Errorf("failed to marshal input: %w", err) } - return os.WriteFile(inputPath, rootConfigJson, 0600) + return os.WriteFile(inputPath, rootConfigJson, 0o600) } -func loadOutputFile(rootPath string, outputPath string) (dyn.Value, diag.Diagnostics) { +func loadOutputFile(rootPath, outputPath string) (dyn.Value, diag.Diagnostics) { outputFile, err := os.Open(outputPath) if err != nil { return dyn.InvalidValue, diag.FromErr(fmt.Errorf("failed to open output file: %w", err)) @@ -381,7 +379,7 @@ func createLoadOverrideVisitor(ctx context.Context) merge.OverrideVisitor { return right, nil }, - VisitUpdate: func(valuePath dyn.Path, left dyn.Value, right dyn.Value) (dyn.Value, error) { + VisitUpdate: func(valuePath dyn.Path, left, right dyn.Value) (dyn.Value, error) { return dyn.InvalidValue, fmt.Errorf("unexpected change at %q (update)", valuePath.String()) }, } @@ -430,7 +428,7 @@ func createInitOverrideVisitor(ctx context.Context) merge.OverrideVisitor { return right, nil }, - VisitUpdate: func(valuePath dyn.Path, left dyn.Value, right dyn.Value) (dyn.Value, error) { + VisitUpdate: func(valuePath dyn.Path, left, right dyn.Value) (dyn.Value, error) { if !valuePath.HasPrefix(jobsPath) { return dyn.InvalidValue, fmt.Errorf("unexpected change at %q (update)", valuePath.String()) } diff --git a/bundle/config/mutator/python/python_mutator_test.go b/bundle/config/mutator/python/python_mutator_test.go index 7a419d799..8bdf91d03 100644 --- a/bundle/config/mutator/python/python_mutator_test.go +++ b/bundle/config/mutator/python/python_mutator_test.go @@ -106,7 +106,6 @@ func TestPythonMutator_load(t *testing.T) { Column: 5, }, }, diags[0].Locations) - } func TestPythonMutator_load_disallowed(t *testing.T) { @@ -542,7 +541,7 @@ func TestLoadDiagnosticsFile_nonExistent(t *testing.T) { func TestInterpreterPath(t *testing.T) { if runtime.GOOS == "windows" { - assert.Equal(t, "venv\\Scripts\\python3.exe", interpreterPath("venv")) + assert.Equal(t, "venv\\Scripts\\python.exe", interpreterPath("venv")) } else { assert.Equal(t, "venv/bin/python3", interpreterPath("venv")) } @@ -588,7 +587,7 @@ or activate the environment before running CLI commands: assert.Equal(t, expected, out) } -func withProcessStub(t *testing.T, args []string, output string, diagnostics string) context.Context { +func withProcessStub(t *testing.T, args []string, output, diagnostics string) context.Context { ctx := context.Background() ctx, stub := process.WithStub(ctx) @@ -611,10 +610,10 @@ func withProcessStub(t *testing.T, args []string, output string, diagnostics str assert.NoError(t, err) if reflect.DeepEqual(actual.Args, args) { - err := os.WriteFile(outputPath, []byte(output), 0600) + err := os.WriteFile(outputPath, []byte(output), 0o600) require.NoError(t, err) - err = os.WriteFile(diagnosticsPath, []byte(diagnostics), 0600) + err = os.WriteFile(diagnosticsPath, []byte(diagnostics), 0o600) require.NoError(t, err) return nil @@ -626,7 +625,7 @@ func withProcessStub(t *testing.T, args []string, output string, diagnostics str return ctx } -func loadYaml(name string, content string) *bundle.Bundle { +func loadYaml(name, content string) *bundle.Bundle { v, diag := config.LoadFromBytes(name, []byte(content)) if diag.Error() != nil { @@ -650,17 +649,17 @@ func withFakeVEnv(t *testing.T, venvPath string) { interpreterPath := interpreterPath(venvPath) - err = os.MkdirAll(filepath.Dir(interpreterPath), 0755) + err = os.MkdirAll(filepath.Dir(interpreterPath), 0o755) if err != nil { panic(err) } - err = os.WriteFile(interpreterPath, []byte(""), 0755) + err = os.WriteFile(interpreterPath, []byte(""), 0o755) if err != nil { panic(err) } - err = os.WriteFile(filepath.Join(venvPath, "pyvenv.cfg"), []byte(""), 0755) + err = os.WriteFile(filepath.Join(venvPath, "pyvenv.cfg"), []byte(""), 0o755) if err != nil { panic(err) } @@ -674,7 +673,7 @@ func withFakeVEnv(t *testing.T, venvPath string) { func interpreterPath(venvPath string) string { if runtime.GOOS == "windows" { - return filepath.Join(venvPath, "Scripts", "python3.exe") + return filepath.Join(venvPath, "Scripts", "python.exe") } else { return filepath.Join(venvPath, "bin", "python3") } diff --git a/bundle/config/mutator/resolve_resource_references.go b/bundle/config/mutator/resolve_resource_references.go index 89eaa346c..bf902f928 100644 --- a/bundle/config/mutator/resolve_resource_references.go +++ b/bundle/config/mutator/resolve_resource_references.go @@ -36,8 +36,7 @@ func (m *resolveResourceReferences) Apply(ctx context.Context, b *bundle.Bundle) return fmt.Errorf("failed to resolve %s, err: %w", v.Lookup, err) } - v.Set(id) - return nil + return v.Set(id) }) } diff --git a/bundle/config/mutator/resolve_resource_references_test.go b/bundle/config/mutator/resolve_resource_references_test.go index ee2f0e2ea..624e337c7 100644 --- a/bundle/config/mutator/resolve_resource_references_test.go +++ b/bundle/config/mutator/resolve_resource_references_test.go @@ -108,7 +108,8 @@ func TestNoLookupIfVariableIsSet(t *testing.T) { m := mocks.NewMockWorkspaceClient(t) b.SetWorkpaceClient(m.WorkspaceClient) - b.Config.Variables["my-cluster-id"].Set("random value") + err := b.Config.Variables["my-cluster-id"].Set("random value") + require.NoError(t, err) diags := bundle.Apply(context.Background(), b, ResolveResourceReferences()) require.NoError(t, diags.Error()) diff --git a/bundle/config/mutator/resolve_variable_references.go b/bundle/config/mutator/resolve_variable_references.go index 5e5b76109..8c207e375 100644 --- a/bundle/config/mutator/resolve_variable_references.go +++ b/bundle/config/mutator/resolve_variable_references.go @@ -32,11 +32,12 @@ func ResolveVariableReferencesInLookup() bundle.Mutator { } func ResolveVariableReferencesInComplexVariables() bundle.Mutator { - return &resolveVariableReferences{prefixes: []string{ - "bundle", - "workspace", - "variables", - }, + return &resolveVariableReferences{ + prefixes: []string{ + "bundle", + "workspace", + "variables", + }, pattern: dyn.NewPattern(dyn.Key("variables"), dyn.AnyKey(), dyn.Key("value")), lookupFn: lookupForComplexVariables, skipFn: skipResolvingInNonComplexVariables, @@ -173,7 +174,6 @@ func (m *resolveVariableReferences) Apply(ctx context.Context, b *bundle.Bundle) return dyn.InvalidValue, dynvar.ErrSkipResolution }) }) - if err != nil { return dyn.InvalidValue, err } @@ -184,7 +184,6 @@ func (m *resolveVariableReferences) Apply(ctx context.Context, b *bundle.Bundle) diags = diags.Extend(normaliseDiags) return root, nil }) - if err != nil { diags = diags.Extend(diag.FromErr(err)) } diff --git a/bundle/config/mutator/rewrite_workspace_prefix.go b/bundle/config/mutator/rewrite_workspace_prefix.go index 8a39ee8a1..0ccb3314b 100644 --- a/bundle/config/mutator/rewrite_workspace_prefix.go +++ b/bundle/config/mutator/rewrite_workspace_prefix.go @@ -63,7 +63,6 @@ func (m *rewriteWorkspacePrefix) Apply(ctx context.Context, b *bundle.Bundle) di return v, nil }) }) - if err != nil { return diag.FromErr(err) } diff --git a/bundle/config/mutator/rewrite_workspace_prefix_test.go b/bundle/config/mutator/rewrite_workspace_prefix_test.go index d75ec89db..48973a4cf 100644 --- a/bundle/config/mutator/rewrite_workspace_prefix_test.go +++ b/bundle/config/mutator/rewrite_workspace_prefix_test.go @@ -81,5 +81,4 @@ func TestNoWorkspacePrefixUsed(t *testing.T) { require.Equal(t, "${workspace.artifact_path}/jar1.jar", b.Config.Resources.Jobs["test_job"].JobSettings.Tasks[1].Libraries[0].Jar) require.Equal(t, "${workspace.file_path}/notebook2", b.Config.Resources.Jobs["test_job"].JobSettings.Tasks[2].NotebookTask.NotebookPath) require.Equal(t, "${workspace.artifact_path}/jar2.jar", b.Config.Resources.Jobs["test_job"].JobSettings.Tasks[2].Libraries[0].Jar) - } diff --git a/bundle/config/mutator/run_as.go b/bundle/config/mutator/run_as.go index 0ca71e28e..7ffd782c2 100644 --- a/bundle/config/mutator/run_as.go +++ b/bundle/config/mutator/run_as.go @@ -12,8 +12,7 @@ import ( "github.com/databricks/databricks-sdk-go/service/jobs" ) -type setRunAs struct { -} +type setRunAs struct{} // This mutator does two things: // @@ -30,7 +29,7 @@ func (m *setRunAs) Name() string { return "SetRunAs" } -func reportRunAsNotSupported(resourceType string, location dyn.Location, currentUser string, runAsUser string) diag.Diagnostics { +func reportRunAsNotSupported(resourceType string, location dyn.Location, currentUser, runAsUser string) diag.Diagnostics { return diag.Diagnostics{{ Summary: fmt.Sprintf("%s do not support a setting a run_as user that is different from the owner.\n"+ "Current identity: %s. Run as identity: %s.\n"+ diff --git a/bundle/config/mutator/set_variables.go b/bundle/config/mutator/set_variables.go index 47ce2ad03..9e9f2dcfe 100644 --- a/bundle/config/mutator/set_variables.go +++ b/bundle/config/mutator/set_variables.go @@ -65,7 +65,6 @@ func setVariable(ctx context.Context, v dyn.Value, variable *variable.Variable, // We should have had a value to set for the variable at this point. return dyn.InvalidValue, fmt.Errorf(`no value assigned to required variable %s. Assignment can be done through the "--var" flag or by setting the %s environment variable`, name, bundleVarPrefix+name) - } func (m *setVariables) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { diff --git a/bundle/config/mutator/sync_infer_root.go b/bundle/config/mutator/sync_infer_root.go index 512adcdbf..160fcc908 100644 --- a/bundle/config/mutator/sync_infer_root.go +++ b/bundle/config/mutator/sync_infer_root.go @@ -35,7 +35,7 @@ func (m *syncInferRoot) Name() string { // If the path does not exist, it returns an empty string. // // See "sync_infer_root_internal_test.go" for examples. -func (m *syncInferRoot) computeRoot(path string, root string) string { +func (m *syncInferRoot) computeRoot(path, root string) string { for !filepath.IsLocal(path) { // Break if we have reached the root of the filesystem. dir := filepath.Dir(root) diff --git a/bundle/config/mutator/translate_paths.go b/bundle/config/mutator/translate_paths.go index 5e016d8a1..af0f94120 100644 --- a/bundle/config/mutator/translate_paths.go +++ b/bundle/config/mutator/translate_paths.go @@ -275,8 +275,8 @@ func (m *translatePaths) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnos } func gatherFallbackPaths(v dyn.Value, typ string) (map[string]string, error) { - var fallback = make(map[string]string) - var pattern = dyn.NewPattern(dyn.Key("resources"), dyn.Key(typ), dyn.AnyKey()) + fallback := make(map[string]string) + pattern := dyn.NewPattern(dyn.Key("resources"), dyn.Key(typ), dyn.AnyKey()) // Previous behavior was to use a resource's location as the base path to resolve // relative paths in its definition. With the introduction of [dyn.Value] throughout, diff --git a/bundle/config/mutator/translate_paths_test.go b/bundle/config/mutator/translate_paths_test.go index bf6ba15d8..493abb8c5 100644 --- a/bundle/config/mutator/translate_paths_test.go +++ b/bundle/config/mutator/translate_paths_test.go @@ -28,12 +28,13 @@ import ( func touchNotebookFile(t *testing.T, path string) { f, err := os.Create(path) require.NoError(t, err) - f.WriteString("# Databricks notebook source\n") + _, err = f.WriteString("# Databricks notebook source\n") + require.NoError(t, err) f.Close() } func touchEmptyFile(t *testing.T, path string) { - err := os.MkdirAll(filepath.Dir(path), 0700) + err := os.MkdirAll(filepath.Dir(path), 0o700) require.NoError(t, err) f, err := os.Create(path) require.NoError(t, err) diff --git a/bundle/config/mutator/verify_cli_version.go b/bundle/config/mutator/verify_cli_version.go index 279af44e6..873e4f780 100644 --- a/bundle/config/mutator/verify_cli_version.go +++ b/bundle/config/mutator/verify_cli_version.go @@ -15,8 +15,7 @@ func VerifyCliVersion() bundle.Mutator { return &verifyCliVersion{} } -type verifyCliVersion struct { -} +type verifyCliVersion struct{} func (v *verifyCliVersion) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { // No constraints specified, skip the check. diff --git a/bundle/config/presets.go b/bundle/config/presets.go index 30f56c0f8..252c5b5f7 100644 --- a/bundle/config/presets.go +++ b/bundle/config/presets.go @@ -1,7 +1,9 @@ package config -const Paused = "PAUSED" -const Unpaused = "UNPAUSED" +const ( + Paused = "PAUSED" + Unpaused = "UNPAUSED" +) type Presets struct { // NamePrefix to prepend to all resource names. diff --git a/bundle/config/resources_test.go b/bundle/config/resources_test.go index 9ae73b22a..2d05acf3e 100644 --- a/bundle/config/resources_test.go +++ b/bundle/config/resources_test.go @@ -49,7 +49,8 @@ func TestCustomMarshallerIsImplemented(t *testing.T) { // Eg: resource.Job implements MarshalJSON v := reflect.Zero(vt.Elem()).Interface() assert.NotPanics(t, func() { - json.Marshal(v) + _, err := json.Marshal(v) + assert.NoError(t, err) }, "Resource %s does not have a custom marshaller", field.Name) // Unmarshalling a *resourceStruct will panic if the resource does not have a custom unmarshaller @@ -58,7 +59,8 @@ func TestCustomMarshallerIsImplemented(t *testing.T) { // Eg: *resource.Job implements UnmarshalJSON v = reflect.New(vt.Elem()).Interface() assert.NotPanics(t, func() { - json.Unmarshal([]byte("{}"), v) + err := json.Unmarshal([]byte("{}"), v) + assert.NoError(t, err) }, "Resource %s does not have a custom unmarshaller", field.Name) } } diff --git a/bundle/config/root_test.go b/bundle/config/root_test.go index a77f961bd..42fae49d9 100644 --- a/bundle/config/root_test.go +++ b/bundle/config/root_test.go @@ -100,7 +100,7 @@ func TestRootMergeTargetOverridesWithMode(t *testing.T) { }, }, } - root.initializeDynamicValue() + require.NoError(t, root.initializeDynamicValue()) require.NoError(t, root.MergeTargetOverrides("development")) assert.Equal(t, Development, root.Bundle.Mode) } @@ -156,7 +156,7 @@ func TestRootMergeTargetOverridesWithVariables(t *testing.T) { }, }, } - root.initializeDynamicValue() + require.NoError(t, root.initializeDynamicValue()) require.NoError(t, root.MergeTargetOverrides("development")) assert.Equal(t, "bar", root.Variables["foo"].Default) assert.Equal(t, "foo var", root.Variables["foo"].Description) @@ -168,7 +168,6 @@ func TestRootMergeTargetOverridesWithVariables(t *testing.T) { "key1": "value1", }, root.Variables["complex"].Default) assert.Equal(t, "complex var", root.Variables["complex"].Description) - } func TestIsFullVariableOverrideDef(t *testing.T) { @@ -252,5 +251,4 @@ func TestIsFullVariableOverrideDef(t *testing.T) { for i, tc := range testCases { assert.Equal(t, tc.expected, isFullVariableOverrideDef(tc.value), "test case %d", i) } - } diff --git a/bundle/config/validate/files_to_sync.go b/bundle/config/validate/files_to_sync.go index a14278482..b4de06773 100644 --- a/bundle/config/validate/files_to_sync.go +++ b/bundle/config/validate/files_to_sync.go @@ -13,8 +13,7 @@ func FilesToSync() bundle.ReadOnlyMutator { return &filesToSync{} } -type filesToSync struct { -} +type filesToSync struct{} func (v *filesToSync) Name() string { return "validate:files_to_sync" diff --git a/bundle/config/validate/files_to_sync_test.go b/bundle/config/validate/files_to_sync_test.go index 30af9026d..d6a1ed59a 100644 --- a/bundle/config/validate/files_to_sync_test.go +++ b/bundle/config/validate/files_to_sync_test.go @@ -2,6 +2,7 @@ package validate import ( "context" + "path/filepath" "testing" "github.com/databricks/cli/bundle" @@ -81,7 +82,7 @@ func TestFilesToSync_EverythingIgnored(t *testing.T) { b := setupBundleForFilesToSyncTest(t) // Ignore all files. - testutil.WriteFile(t, "*\n.*\n", b.BundleRootPath, ".gitignore") + testutil.WriteFile(t, filepath.Join(b.BundleRootPath, ".gitignore"), "*\n.*\n") ctx := context.Background() rb := bundle.ReadOnly(b) diff --git a/bundle/config/validate/folder_permissions.go b/bundle/config/validate/folder_permissions.go index 505e82a1e..aa89a0551 100644 --- a/bundle/config/validate/folder_permissions.go +++ b/bundle/config/validate/folder_permissions.go @@ -15,8 +15,7 @@ import ( "golang.org/x/sync/errgroup" ) -type folderPermissions struct { -} +type folderPermissions struct{} // Apply implements bundle.ReadOnlyMutator. func (f *folderPermissions) Apply(ctx context.Context, b bundle.ReadOnlyBundle) diag.Diagnostics { diff --git a/bundle/config/validate/job_cluster_key_defined.go b/bundle/config/validate/job_cluster_key_defined.go index 368c3edb1..c3a1ab3df 100644 --- a/bundle/config/validate/job_cluster_key_defined.go +++ b/bundle/config/validate/job_cluster_key_defined.go @@ -13,8 +13,7 @@ func JobClusterKeyDefined() bundle.ReadOnlyMutator { return &jobClusterKeyDefined{} } -type jobClusterKeyDefined struct { -} +type jobClusterKeyDefined struct{} func (v *jobClusterKeyDefined) Name() string { return "validate:job_cluster_key_defined" diff --git a/bundle/config/validate/job_task_cluster_spec.go b/bundle/config/validate/job_task_cluster_spec.go index b80befcdf..5f532acfe 100644 --- a/bundle/config/validate/job_task_cluster_spec.go +++ b/bundle/config/validate/job_task_cluster_spec.go @@ -17,8 +17,7 @@ func JobTaskClusterSpec() bundle.ReadOnlyMutator { return &jobTaskClusterSpec{} } -type jobTaskClusterSpec struct { -} +type jobTaskClusterSpec struct{} func (v *jobTaskClusterSpec) Name() string { return "validate:job_task_cluster_spec" diff --git a/bundle/config/validate/single_node_cluster_test.go b/bundle/config/validate/single_node_cluster_test.go index 18771cc00..c3ead8ef6 100644 --- a/bundle/config/validate/single_node_cluster_test.go +++ b/bundle/config/validate/single_node_cluster_test.go @@ -175,7 +175,6 @@ func TestValidateSingleNodeClusterFailForJobClusters(t *testing.T) { Paths: []dyn.Path{dyn.MustPathFromString("resources.jobs.foo.job_clusters[0].new_cluster")}, }, }, diags) - }) } } diff --git a/bundle/config/validate/validate.go b/bundle/config/validate/validate.go index d1420ee80..6b52b920a 100644 --- a/bundle/config/validate/validate.go +++ b/bundle/config/validate/validate.go @@ -8,8 +8,7 @@ import ( "github.com/databricks/cli/libs/dyn" ) -type validate struct { -} +type validate struct{} type location struct { path string diff --git a/bundle/config/validate/validate_sync_patterns.go b/bundle/config/validate/validate_sync_patterns.go index 52f06835c..f5787a81d 100644 --- a/bundle/config/validate/validate_sync_patterns.go +++ b/bundle/config/validate/validate_sync_patterns.go @@ -17,8 +17,7 @@ func ValidateSyncPatterns() bundle.ReadOnlyMutator { return &validateSyncPatterns{} } -type validateSyncPatterns struct { -} +type validateSyncPatterns struct{} func (v *validateSyncPatterns) Name() string { return "validate:validate_sync_patterns" diff --git a/bundle/config/variable/lookup_test.go b/bundle/config/variable/lookup_test.go index a84748751..bd54d89fc 100644 --- a/bundle/config/variable/lookup_test.go +++ b/bundle/config/variable/lookup_test.go @@ -42,7 +42,6 @@ func TestLookup_Empty(t *testing.T) { // No string representation for an invalid lookup assert.Empty(t, lookup.String()) - } func TestLookup_Multiple(t *testing.T) { diff --git a/bundle/config/variable/resolve_cluster.go b/bundle/config/variable/resolve_cluster.go index 2d68b7fb7..a8cf3fe7f 100644 --- a/bundle/config/variable/resolve_cluster.go +++ b/bundle/config/variable/resolve_cluster.go @@ -20,7 +20,6 @@ func (l resolveCluster) Resolve(ctx context.Context, w *databricks.WorkspaceClie ClusterSources: []compute.ClusterSource{compute.ClusterSourceApi, compute.ClusterSourceUi}, }, }) - if err != nil { return "", err } diff --git a/bundle/config/workspace_test.go b/bundle/config/workspace_test.go index 3ef963253..384cc0a2c 100644 --- a/bundle/config/workspace_test.go +++ b/bundle/config/workspace_test.go @@ -11,6 +11,7 @@ import ( "github.com/databricks/cli/libs/databrickscfg" "github.com/databricks/databricks-sdk-go/config" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func setupWorkspaceTest(t *testing.T) string { @@ -42,11 +43,12 @@ func TestWorkspaceResolveProfileFromHost(t *testing.T) { setupWorkspaceTest(t) // This works if there is a config file with a matching profile. - databrickscfg.SaveToProfile(context.Background(), &config.Config{ + err := databrickscfg.SaveToProfile(context.Background(), &config.Config{ Profile: "default", Host: "https://abc.cloud.databricks.com", Token: "123", }) + require.NoError(t, err) client, err := w.Client() assert.NoError(t, err) @@ -57,12 +59,13 @@ func TestWorkspaceResolveProfileFromHost(t *testing.T) { home := setupWorkspaceTest(t) // This works if there is a config file with a matching profile. - databrickscfg.SaveToProfile(context.Background(), &config.Config{ + err := databrickscfg.SaveToProfile(context.Background(), &config.Config{ ConfigFile: filepath.Join(home, "customcfg"), Profile: "custom", Host: "https://abc.cloud.databricks.com", Token: "123", }) + require.NoError(t, err) t.Setenv("DATABRICKS_CONFIG_FILE", filepath.Join(home, "customcfg")) client, err := w.Client() @@ -90,12 +93,13 @@ func TestWorkspaceVerifyProfileForHost(t *testing.T) { setupWorkspaceTest(t) // This works if there is a config file with a matching profile. - databrickscfg.SaveToProfile(context.Background(), &config.Config{ + err := databrickscfg.SaveToProfile(context.Background(), &config.Config{ Profile: "abc", Host: "https://abc.cloud.databricks.com", }) + require.NoError(t, err) - _, err := w.Client() + _, err = w.Client() assert.NoError(t, err) }) @@ -103,12 +107,13 @@ func TestWorkspaceVerifyProfileForHost(t *testing.T) { setupWorkspaceTest(t) // This works if there is a config file with a matching profile. - databrickscfg.SaveToProfile(context.Background(), &config.Config{ + err := databrickscfg.SaveToProfile(context.Background(), &config.Config{ Profile: "abc", Host: "https://def.cloud.databricks.com", }) + require.NoError(t, err) - _, err := w.Client() + _, err = w.Client() assert.ErrorContains(t, err, "config host mismatch") }) @@ -116,14 +121,15 @@ func TestWorkspaceVerifyProfileForHost(t *testing.T) { home := setupWorkspaceTest(t) // This works if there is a config file with a matching profile. - databrickscfg.SaveToProfile(context.Background(), &config.Config{ + err := databrickscfg.SaveToProfile(context.Background(), &config.Config{ ConfigFile: filepath.Join(home, "customcfg"), Profile: "abc", Host: "https://abc.cloud.databricks.com", }) + require.NoError(t, err) t.Setenv("DATABRICKS_CONFIG_FILE", filepath.Join(home, "customcfg")) - _, err := w.Client() + _, err = w.Client() assert.NoError(t, err) }) @@ -131,14 +137,15 @@ func TestWorkspaceVerifyProfileForHost(t *testing.T) { home := setupWorkspaceTest(t) // This works if there is a config file with a matching profile. - databrickscfg.SaveToProfile(context.Background(), &config.Config{ + err := databrickscfg.SaveToProfile(context.Background(), &config.Config{ ConfigFile: filepath.Join(home, "customcfg"), Profile: "abc", Host: "https://def.cloud.databricks.com", }) + require.NoError(t, err) t.Setenv("DATABRICKS_CONFIG_FILE", filepath.Join(home, "customcfg")) - _, err := w.Client() + _, err = w.Client() assert.ErrorContains(t, err, "config host mismatch") }) } diff --git a/bundle/deferred.go b/bundle/deferred.go index 56c2bdca2..e7e0c2aeb 100644 --- a/bundle/deferred.go +++ b/bundle/deferred.go @@ -15,7 +15,7 @@ func (d *DeferredMutator) Name() string { return "deferred" } -func Defer(mutator Mutator, finally Mutator) Mutator { +func Defer(mutator, finally Mutator) Mutator { return &DeferredMutator{ mutator: mutator, finally: finally, diff --git a/bundle/deferred_test.go b/bundle/deferred_test.go index 3abc4aa10..ea3df17c4 100644 --- a/bundle/deferred_test.go +++ b/bundle/deferred_test.go @@ -19,7 +19,7 @@ func (t *mutatorWithError) Name() string { func (t *mutatorWithError) Apply(_ context.Context, b *Bundle) diag.Diagnostics { t.applyCalled++ - return diag.Errorf(t.errorMsg) + return diag.Errorf(t.errorMsg) // nolint:govet } func TestDeferredMutatorWhenAllMutatorsSucceed(t *testing.T) { diff --git a/bundle/deploy/state.go b/bundle/deploy/state.go index 4f2bc4ee4..a131ab9c3 100644 --- a/bundle/deploy/state.go +++ b/bundle/deploy/state.go @@ -15,8 +15,10 @@ import ( "github.com/google/uuid" ) -const DeploymentStateFileName = "deployment.json" -const DeploymentStateVersion = 1 +const ( + DeploymentStateFileName = "deployment.json" + DeploymentStateVersion = 1 +) type File struct { LocalPath string `json:"local_path"` @@ -132,7 +134,7 @@ func (f Filelist) ToSlice(root vfs.Path) []fileset.File { return files } -func isLocalStateStale(local io.Reader, remote io.Reader) bool { +func isLocalStateStale(local, remote io.Reader) bool { localState, err := loadState(local) if err != nil { return true diff --git a/bundle/deploy/state_pull.go b/bundle/deploy/state_pull.go index 5e301a6f3..8fffca073 100644 --- a/bundle/deploy/state_pull.go +++ b/bundle/deploy/state_pull.go @@ -44,7 +44,7 @@ func (s *statePull) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostic return diag.FromErr(err) } - local, err := os.OpenFile(statePath, os.O_CREATE|os.O_RDWR, 0600) + local, err := os.OpenFile(statePath, os.O_CREATE|os.O_RDWR, 0o600) if err != nil { return diag.FromErr(err) } @@ -62,8 +62,14 @@ func (s *statePull) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostic } // Truncating the file before writing - local.Truncate(0) - local.Seek(0, 0) + err = local.Truncate(0) + if err != nil { + return diag.FromErr(err) + } + _, err = local.Seek(0, 0) + if err != nil { + return diag.FromErr(err) + } // Write file to disk. log.Infof(ctx, "Writing remote deployment state file to local cache directory") diff --git a/bundle/deploy/state_pull_test.go b/bundle/deploy/state_pull_test.go index 42701eb26..36c49fb01 100644 --- a/bundle/deploy/state_pull_test.go +++ b/bundle/deploy/state_pull_test.go @@ -99,7 +99,7 @@ func testStatePull(t *testing.T, opts statePullOpts) { snapshotPath, err := sync.SnapshotPath(opts) require.NoError(t, err) - err = os.WriteFile(snapshotPath, []byte("snapshot"), 0644) + err = os.WriteFile(snapshotPath, []byte("snapshot"), 0o644) require.NoError(t, err) } @@ -110,7 +110,7 @@ func testStatePull(t *testing.T, opts statePullOpts) { data, err := json.Marshal(opts.localState) require.NoError(t, err) - err = os.WriteFile(statePath, data, 0644) + err = os.WriteFile(statePath, data, 0o644) require.NoError(t, err) } diff --git a/bundle/deploy/state_push_test.go b/bundle/deploy/state_push_test.go index 038b75341..3562ec147 100644 --- a/bundle/deploy/state_push_test.go +++ b/bundle/deploy/state_push_test.go @@ -74,7 +74,7 @@ func TestStatePush(t *testing.T) { data, err := json.Marshal(state) require.NoError(t, err) - err = os.WriteFile(statePath, data, 0644) + err = os.WriteFile(statePath, data, 0o644) require.NoError(t, err) diags := bundle.Apply(ctx, b, s) diff --git a/bundle/deploy/state_update.go b/bundle/deploy/state_update.go index 9ab1bacf1..5488d50ed 100644 --- a/bundle/deploy/state_update.go +++ b/bundle/deploy/state_update.go @@ -17,8 +17,7 @@ import ( "github.com/google/uuid" ) -type stateUpdate struct { -} +type stateUpdate struct{} func (s *stateUpdate) Name() string { return "deploy:state-update" @@ -57,7 +56,7 @@ func (s *stateUpdate) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnost return diag.FromErr(err) } // Write the state back to the file. - f, err := os.OpenFile(statePath, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0600) + f, err := os.OpenFile(statePath, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0o600) if err != nil { log.Infof(ctx, "Unable to open deployment state file: %s", err) return diag.FromErr(err) diff --git a/bundle/deploy/state_update_test.go b/bundle/deploy/state_update_test.go index 1f5010b52..e561f534e 100644 --- a/bundle/deploy/state_update_test.go +++ b/bundle/deploy/state_update_test.go @@ -119,7 +119,7 @@ func TestStateUpdateWithExistingState(t *testing.T) { data, err := json.Marshal(state) require.NoError(t, err) - err = os.WriteFile(statePath, data, 0644) + err = os.WriteFile(statePath, data, 0o644) require.NoError(t, err) diags := bundle.Apply(ctx, b, s) diff --git a/bundle/deploy/terraform/check_dashboards_modified_remotely.go b/bundle/deploy/terraform/check_dashboards_modified_remotely.go index c884bcb9b..f263e8a7f 100644 --- a/bundle/deploy/terraform/check_dashboards_modified_remotely.go +++ b/bundle/deploy/terraform/check_dashboards_modified_remotely.go @@ -42,8 +42,7 @@ func collectDashboardsFromState(ctx context.Context, b *bundle.Bundle) ([]dashbo return dashboards, nil } -type checkDashboardsModifiedRemotely struct { -} +type checkDashboardsModifiedRemotely struct{} func (l *checkDashboardsModifiedRemotely) Name() string { return "CheckDashboardsModifiedRemotely" diff --git a/bundle/deploy/terraform/check_dashboards_modified_remotely_test.go b/bundle/deploy/terraform/check_dashboards_modified_remotely_test.go index 25aee125f..1bed3b1be 100644 --- a/bundle/deploy/terraform/check_dashboards_modified_remotely_test.go +++ b/bundle/deploy/terraform/check_dashboards_modified_remotely_test.go @@ -139,7 +139,7 @@ func writeFakeDashboardState(t *testing.T, ctx context.Context, b *bundle.Bundle require.NoError(t, err) // Write fake state file. - testutil.WriteFile(t, ` + testutil.WriteFile(t, filepath.Join(tfDir, TerraformStateFileName), ` { "version": 4, "terraform_version": "1.5.5", @@ -187,5 +187,5 @@ func writeFakeDashboardState(t *testing.T, ctx context.Context, b *bundle.Bundle } ] } - `, filepath.Join(tfDir, TerraformStateFileName)) + `) } diff --git a/bundle/deploy/terraform/check_running_resources.go b/bundle/deploy/terraform/check_running_resources.go index 737f773e5..5b3a70408 100644 --- a/bundle/deploy/terraform/check_running_resources.go +++ b/bundle/deploy/terraform/check_running_resources.go @@ -23,8 +23,7 @@ func (e ErrResourceIsRunning) Error() string { return fmt.Sprintf("%s %s is running", e.resourceType, e.resourceId) } -type checkRunningResources struct { -} +type checkRunningResources struct{} func (l *checkRunningResources) Name() string { return "check-running-resources" diff --git a/bundle/deploy/terraform/convert_test.go b/bundle/deploy/terraform/convert_test.go index 076d9b7a0..61f26c088 100644 --- a/bundle/deploy/terraform/convert_test.go +++ b/bundle/deploy/terraform/convert_test.go @@ -43,7 +43,7 @@ func convertToResourceStruct[T any](t *testing.T, resource *T, data any) { } func TestBundleToTerraformJob(t *testing.T) { - var src = resources.Job{ + src := resources.Job{ JobSettings: &jobs.JobSettings{ Name: "my job", JobClusters: []jobs.JobCluster{ @@ -71,7 +71,7 @@ func TestBundleToTerraformJob(t *testing.T) { }, } - var config = config.Root{ + config := config.Root{ Resources: config.Resources{ Jobs: map[string]*resources.Job{ "my_job": &src, @@ -93,7 +93,7 @@ func TestBundleToTerraformJob(t *testing.T) { } func TestBundleToTerraformJobPermissions(t *testing.T) { - var src = resources.Job{ + src := resources.Job{ Permissions: []resources.Permission{ { Level: "CAN_VIEW", @@ -102,7 +102,7 @@ func TestBundleToTerraformJobPermissions(t *testing.T) { }, } - var config = config.Root{ + config := config.Root{ Resources: config.Resources{ Jobs: map[string]*resources.Job{ "my_job": &src, @@ -121,7 +121,7 @@ func TestBundleToTerraformJobPermissions(t *testing.T) { } func TestBundleToTerraformJobTaskLibraries(t *testing.T) { - var src = resources.Job{ + src := resources.Job{ JobSettings: &jobs.JobSettings{ Name: "my job", Tasks: []jobs.Task{ @@ -139,7 +139,7 @@ func TestBundleToTerraformJobTaskLibraries(t *testing.T) { }, } - var config = config.Root{ + config := config.Root{ Resources: config.Resources{ Jobs: map[string]*resources.Job{ "my_job": &src, @@ -158,7 +158,7 @@ func TestBundleToTerraformJobTaskLibraries(t *testing.T) { } func TestBundleToTerraformForEachTaskLibraries(t *testing.T) { - var src = resources.Job{ + src := resources.Job{ JobSettings: &jobs.JobSettings{ Name: "my job", Tasks: []jobs.Task{ @@ -182,7 +182,7 @@ func TestBundleToTerraformForEachTaskLibraries(t *testing.T) { }, } - var config = config.Root{ + config := config.Root{ Resources: config.Resources{ Jobs: map[string]*resources.Job{ "my_job": &src, @@ -201,7 +201,7 @@ func TestBundleToTerraformForEachTaskLibraries(t *testing.T) { } func TestBundleToTerraformPipeline(t *testing.T) { - var src = resources.Pipeline{ + src := resources.Pipeline{ PipelineSpec: &pipelines.PipelineSpec{ Name: "my pipeline", Libraries: []pipelines.PipelineLibrary{ @@ -239,7 +239,7 @@ func TestBundleToTerraformPipeline(t *testing.T) { }, } - var config = config.Root{ + config := config.Root{ Resources: config.Resources{ Pipelines: map[string]*resources.Pipeline{ "my_pipeline": &src, @@ -262,7 +262,7 @@ func TestBundleToTerraformPipeline(t *testing.T) { } func TestBundleToTerraformPipelinePermissions(t *testing.T) { - var src = resources.Pipeline{ + src := resources.Pipeline{ Permissions: []resources.Permission{ { Level: "CAN_VIEW", @@ -271,7 +271,7 @@ func TestBundleToTerraformPipelinePermissions(t *testing.T) { }, } - var config = config.Root{ + config := config.Root{ Resources: config.Resources{ Pipelines: map[string]*resources.Pipeline{ "my_pipeline": &src, @@ -290,7 +290,7 @@ func TestBundleToTerraformPipelinePermissions(t *testing.T) { } func TestBundleToTerraformModel(t *testing.T) { - var src = resources.MlflowModel{ + src := resources.MlflowModel{ Model: &ml.Model{ Name: "name", Description: "description", @@ -307,7 +307,7 @@ func TestBundleToTerraformModel(t *testing.T) { }, } - var config = config.Root{ + config := config.Root{ Resources: config.Resources{ Models: map[string]*resources.MlflowModel{ "my_model": &src, @@ -330,7 +330,7 @@ func TestBundleToTerraformModel(t *testing.T) { } func TestBundleToTerraformModelPermissions(t *testing.T) { - var src = resources.MlflowModel{ + src := resources.MlflowModel{ Model: &ml.Model{ Name: "name", }, @@ -342,7 +342,7 @@ func TestBundleToTerraformModelPermissions(t *testing.T) { }, } - var config = config.Root{ + config := config.Root{ Resources: config.Resources{ Models: map[string]*resources.MlflowModel{ "my_model": &src, @@ -361,13 +361,13 @@ func TestBundleToTerraformModelPermissions(t *testing.T) { } func TestBundleToTerraformExperiment(t *testing.T) { - var src = resources.MlflowExperiment{ + src := resources.MlflowExperiment{ Experiment: &ml.Experiment{ Name: "name", }, } - var config = config.Root{ + config := config.Root{ Resources: config.Resources{ Experiments: map[string]*resources.MlflowExperiment{ "my_experiment": &src, @@ -384,7 +384,7 @@ func TestBundleToTerraformExperiment(t *testing.T) { } func TestBundleToTerraformExperimentPermissions(t *testing.T) { - var src = resources.MlflowExperiment{ + src := resources.MlflowExperiment{ Experiment: &ml.Experiment{ Name: "name", }, @@ -396,7 +396,7 @@ func TestBundleToTerraformExperimentPermissions(t *testing.T) { }, } - var config = config.Root{ + config := config.Root{ Resources: config.Resources{ Experiments: map[string]*resources.MlflowExperiment{ "my_experiment": &src, @@ -415,7 +415,7 @@ func TestBundleToTerraformExperimentPermissions(t *testing.T) { } func TestBundleToTerraformModelServing(t *testing.T) { - var src = resources.ModelServingEndpoint{ + src := resources.ModelServingEndpoint{ CreateServingEndpoint: &serving.CreateServingEndpoint{ Name: "name", Config: serving.EndpointCoreConfigInput{ @@ -439,7 +439,7 @@ func TestBundleToTerraformModelServing(t *testing.T) { }, } - var config = config.Root{ + config := config.Root{ Resources: config.Resources{ ModelServingEndpoints: map[string]*resources.ModelServingEndpoint{ "my_model_serving_endpoint": &src, @@ -462,7 +462,7 @@ func TestBundleToTerraformModelServing(t *testing.T) { } func TestBundleToTerraformModelServingPermissions(t *testing.T) { - var src = resources.ModelServingEndpoint{ + src := resources.ModelServingEndpoint{ CreateServingEndpoint: &serving.CreateServingEndpoint{ Name: "name", @@ -492,7 +492,7 @@ func TestBundleToTerraformModelServingPermissions(t *testing.T) { }, } - var config = config.Root{ + config := config.Root{ Resources: config.Resources{ ModelServingEndpoints: map[string]*resources.ModelServingEndpoint{ "my_model_serving_endpoint": &src, @@ -511,7 +511,7 @@ func TestBundleToTerraformModelServingPermissions(t *testing.T) { } func TestBundleToTerraformRegisteredModel(t *testing.T) { - var src = resources.RegisteredModel{ + src := resources.RegisteredModel{ CreateRegisteredModelRequest: &catalog.CreateRegisteredModelRequest{ Name: "name", CatalogName: "catalog", @@ -520,7 +520,7 @@ func TestBundleToTerraformRegisteredModel(t *testing.T) { }, } - var config = config.Root{ + config := config.Root{ Resources: config.Resources{ RegisteredModels: map[string]*resources.RegisteredModel{ "my_registered_model": &src, @@ -540,7 +540,7 @@ func TestBundleToTerraformRegisteredModel(t *testing.T) { } func TestBundleToTerraformRegisteredModelGrants(t *testing.T) { - var src = resources.RegisteredModel{ + src := resources.RegisteredModel{ CreateRegisteredModelRequest: &catalog.CreateRegisteredModelRequest{ Name: "name", CatalogName: "catalog", @@ -554,7 +554,7 @@ func TestBundleToTerraformRegisteredModelGrants(t *testing.T) { }, } - var config = config.Root{ + config := config.Root{ Resources: config.Resources{ RegisteredModels: map[string]*resources.RegisteredModel{ "my_registered_model": &src, @@ -573,14 +573,14 @@ func TestBundleToTerraformRegisteredModelGrants(t *testing.T) { } func TestBundleToTerraformDeletedResources(t *testing.T) { - var job1 = resources.Job{ + job1 := resources.Job{ JobSettings: &jobs.JobSettings{}, } - var job2 = resources.Job{ + job2 := resources.Job{ ModifiedStatus: resources.ModifiedStatusDeleted, JobSettings: &jobs.JobSettings{}, } - var config = config.Root{ + config := config.Root{ Resources: config.Resources{ Jobs: map[string]*resources.Job{ "my_job1": &job1, @@ -601,10 +601,10 @@ func TestBundleToTerraformDeletedResources(t *testing.T) { } func TestTerraformToBundleEmptyLocalResources(t *testing.T) { - var config = config.Root{ + config := config.Root{ Resources: config.Resources{}, } - var tfState = resourcesState{ + tfState := resourcesState{ Resources: []stateResource{ { Type: "databricks_job", @@ -736,7 +736,7 @@ func TestTerraformToBundleEmptyLocalResources(t *testing.T) { } func TestTerraformToBundleEmptyRemoteResources(t *testing.T) { - var config = config.Root{ + config := config.Root{ Resources: config.Resources{ Jobs: map[string]*resources.Job{ "test_job": { @@ -817,7 +817,7 @@ func TestTerraformToBundleEmptyRemoteResources(t *testing.T) { }, }, } - var tfState = resourcesState{ + tfState := resourcesState{ Resources: nil, } err := TerraformToBundle(&tfState, &config) @@ -860,7 +860,7 @@ func TestTerraformToBundleEmptyRemoteResources(t *testing.T) { } func TestTerraformToBundleModifiedResources(t *testing.T) { - var config = config.Root{ + config := config.Root{ Resources: config.Resources{ Jobs: map[string]*resources.Job{ "test_job": { @@ -996,7 +996,7 @@ func TestTerraformToBundleModifiedResources(t *testing.T) { }, }, } - var tfState = resourcesState{ + tfState := resourcesState{ Resources: []stateResource{ { Type: "databricks_job", diff --git a/bundle/deploy/terraform/init.go b/bundle/deploy/terraform/init.go index 7d75ee8a8..366f0be8c 100644 --- a/bundle/deploy/terraform/init.go +++ b/bundle/deploy/terraform/init.go @@ -145,7 +145,7 @@ func inheritEnvVars(ctx context.Context, environ map[string]string) error { // This function is used for env vars set by the Databricks VSCode extension. The variables are intended to be used by the CLI // bundled with the Databricks VSCode extension, but users can use different CLI versions in the VSCode terminals, in which case we want to ignore // the variables if that CLI uses different versions of the dependencies. -func getEnvVarWithMatchingVersion(ctx context.Context, envVarName string, versionVarName string, currentVersion string) (string, error) { +func getEnvVarWithMatchingVersion(ctx context.Context, envVarName, versionVarName, currentVersion string) (string, error) { envValue := env.Get(ctx, envVarName) versionValue := env.Get(ctx, versionVarName) diff --git a/bundle/deploy/terraform/init_test.go b/bundle/deploy/terraform/init_test.go index e3621c6c3..a1ffc5a1a 100644 --- a/bundle/deploy/terraform/init_test.go +++ b/bundle/deploy/terraform/init_test.go @@ -400,7 +400,7 @@ func TestFindExecPathFromEnvironmentWithCorrectVersionAndBinary(t *testing.T) { require.Equal(t, tmpBinPath, b.Config.Bundle.Terraform.ExecPath) } -func createTempFile(t *testing.T, dest string, name string, executable bool) string { +func createTempFile(t *testing.T, dest, name string, executable bool) string { binPath := filepath.Join(dest, name) f, err := os.Create(binPath) require.NoError(t, err) @@ -409,7 +409,7 @@ func createTempFile(t *testing.T, dest string, name string, executable bool) str require.NoError(t, err) }() if executable { - err = f.Chmod(0777) + err = f.Chmod(0o777) require.NoError(t, err) } return binPath @@ -422,7 +422,7 @@ func TestGetEnvVarWithMatchingVersion(t *testing.T) { tmp := t.TempDir() file := testutil.Touch(t, tmp, "bar") - var tc = []struct { + tc := []struct { envValue string versionValue string currentVersion string diff --git a/bundle/deploy/terraform/interpolate.go b/bundle/deploy/terraform/interpolate.go index 9c2126aec..813e6bbb7 100644 --- a/bundle/deploy/terraform/interpolate.go +++ b/bundle/deploy/terraform/interpolate.go @@ -10,8 +10,7 @@ import ( "github.com/databricks/cli/libs/dyn/dynvar" ) -type interpolateMutator struct { -} +type interpolateMutator struct{} func Interpolate() bundle.Mutator { return &interpolateMutator{} diff --git a/bundle/deploy/terraform/pkg.go b/bundle/deploy/terraform/pkg.go index cad754024..bd636639f 100644 --- a/bundle/deploy/terraform/pkg.go +++ b/bundle/deploy/terraform/pkg.go @@ -5,15 +5,19 @@ import ( "github.com/hashicorp/go-version" ) -const TerraformStateFileName = "terraform.tfstate" -const TerraformConfigFileName = "bundle.tf.json" +const ( + TerraformStateFileName = "terraform.tfstate" + TerraformConfigFileName = "bundle.tf.json" +) // Users can provide their own terraform binary and databricks terraform provider by setting the following environment variables. // This allows users to use the CLI in an air-gapped environments. See the `debug terraform` command. -const TerraformExecPathEnv = "DATABRICKS_TF_EXEC_PATH" -const TerraformVersionEnv = "DATABRICKS_TF_VERSION" -const TerraformCliConfigPathEnv = "DATABRICKS_TF_CLI_CONFIG_FILE" -const TerraformProviderVersionEnv = "DATABRICKS_TF_PROVIDER_VERSION" +const ( + TerraformExecPathEnv = "DATABRICKS_TF_EXEC_PATH" + TerraformVersionEnv = "DATABRICKS_TF_VERSION" + TerraformCliConfigPathEnv = "DATABRICKS_TF_CLI_CONFIG_FILE" + TerraformProviderVersionEnv = "DATABRICKS_TF_PROVIDER_VERSION" +) // Terraform CLI version to use and the corresponding checksums for it. The // checksums are used to verify the integrity of the downloaded binary. Please @@ -26,8 +30,10 @@ const TerraformProviderVersionEnv = "DATABRICKS_TF_PROVIDER_VERSION" // downloaded Terraform archive. var TerraformVersion = version.Must(version.NewVersion("1.5.5")) -const checksumLinuxArm64 = "b055aefe343d0b710d8a7afd31aeb702b37bbf4493bb9385a709991e48dfbcd2" -const checksumLinuxAmd64 = "ad0c696c870c8525357b5127680cd79c0bdf58179af9acd091d43b1d6482da4a" +const ( + checksumLinuxArm64 = "b055aefe343d0b710d8a7afd31aeb702b37bbf4493bb9385a709991e48dfbcd2" + checksumLinuxAmd64 = "ad0c696c870c8525357b5127680cd79c0bdf58179af9acd091d43b1d6482da4a" +) type Checksum struct { LinuxArm64 string `json:"linux_arm64"` diff --git a/bundle/deploy/terraform/pkg_test.go b/bundle/deploy/terraform/pkg_test.go index b8dcb9e08..08ec3de75 100644 --- a/bundle/deploy/terraform/pkg_test.go +++ b/bundle/deploy/terraform/pkg_test.go @@ -14,7 +14,7 @@ import ( "github.com/stretchr/testify/require" ) -func downloadAndChecksum(t *testing.T, url string, expectedChecksum string) { +func downloadAndChecksum(t *testing.T, url, expectedChecksum string) { resp, err := http.Get(url) require.NoError(t, err) defer resp.Body.Close() diff --git a/bundle/deploy/terraform/plan.go b/bundle/deploy/terraform/plan.go index 72f0b49a8..7f7473efa 100644 --- a/bundle/deploy/terraform/plan.go +++ b/bundle/deploy/terraform/plan.go @@ -2,7 +2,6 @@ package terraform import ( "context" - "fmt" "path/filepath" "github.com/databricks/cli/bundle" @@ -57,7 +56,7 @@ func (p *plan) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { IsEmpty: !notEmpty, } - log.Debugf(ctx, fmt.Sprintf("Planning complete and persisted at %s\n", planPath)) + log.Debugf(ctx, "Planning complete and persisted at %s\n", planPath) return nil } diff --git a/bundle/deploy/terraform/state_pull.go b/bundle/deploy/terraform/state_pull.go index 9a5b91007..4e1e2b1c5 100644 --- a/bundle/deploy/terraform/state_pull.go +++ b/bundle/deploy/terraform/state_pull.go @@ -104,7 +104,7 @@ func (l *statePull) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostic localState, err := l.localState(ctx, b) if errors.Is(err, fs.ErrNotExist) { log.Infof(ctx, "Local state file does not exist. Using remote Terraform state.") - err := os.WriteFile(localStatePath, remoteContent, 0600) + err := os.WriteFile(localStatePath, remoteContent, 0o600) return diag.FromErr(err) } if err != nil { @@ -114,14 +114,14 @@ func (l *statePull) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostic // If the lineage does not match, the Terraform state files do not correspond to the same deployment. if localState.Lineage != remoteState.Lineage { log.Infof(ctx, "Remote and local state lineages do not match. Using remote Terraform state. Invalidating local Terraform state.") - err := os.WriteFile(localStatePath, remoteContent, 0600) + err := os.WriteFile(localStatePath, remoteContent, 0o600) return diag.FromErr(err) } // If the remote state is newer than the local state, we should use the remote state. if remoteState.Serial > localState.Serial { log.Infof(ctx, "Remote state is newer than local state. Using remote Terraform state.") - err := os.WriteFile(localStatePath, remoteContent, 0600) + err := os.WriteFile(localStatePath, remoteContent, 0o600) return diag.FromErr(err) } diff --git a/bundle/deploy/terraform/tfdyn/convert_cluster_test.go b/bundle/deploy/terraform/tfdyn/convert_cluster_test.go index e6d2620c6..330720a7c 100644 --- a/bundle/deploy/terraform/tfdyn/convert_cluster_test.go +++ b/bundle/deploy/terraform/tfdyn/convert_cluster_test.go @@ -14,7 +14,7 @@ import ( ) func TestConvertCluster(t *testing.T) { - var src = resources.Cluster{ + src := resources.Cluster{ ClusterSpec: &compute.ClusterSpec{ NumWorkers: 3, SparkVersion: "13.3.x-scala2.12", @@ -93,5 +93,4 @@ func TestConvertCluster(t *testing.T) { }, }, }, out.Permissions["cluster_my_cluster"]) - } diff --git a/bundle/deploy/terraform/tfdyn/convert_dashboard.go b/bundle/deploy/terraform/tfdyn/convert_dashboard.go index 3ba7e19a2..3ec8b489f 100644 --- a/bundle/deploy/terraform/tfdyn/convert_dashboard.go +++ b/bundle/deploy/terraform/tfdyn/convert_dashboard.go @@ -17,7 +17,7 @@ const ( ) // Marshal "serialized_dashboard" as JSON if it is set in the input but not in the output. -func marshalSerializedDashboard(vin dyn.Value, vout dyn.Value) (dyn.Value, error) { +func marshalSerializedDashboard(vin, vout dyn.Value) (dyn.Value, error) { // Skip if the "serialized_dashboard" field is already set. if v := vout.Get(serializedDashboardFieldName); v.IsValid() { return vout, nil diff --git a/bundle/deploy/terraform/tfdyn/convert_dashboard_test.go b/bundle/deploy/terraform/tfdyn/convert_dashboard_test.go index 539ba21aa..6f5d36504 100644 --- a/bundle/deploy/terraform/tfdyn/convert_dashboard_test.go +++ b/bundle/deploy/terraform/tfdyn/convert_dashboard_test.go @@ -14,7 +14,7 @@ import ( ) func TestConvertDashboard(t *testing.T) { - var src = resources.Dashboard{ + src := resources.Dashboard{ Dashboard: &dashboards.Dashboard{ DisplayName: "my dashboard", WarehouseId: "f00dcafe", @@ -60,7 +60,7 @@ func TestConvertDashboard(t *testing.T) { } func TestConvertDashboardFilePath(t *testing.T) { - var src = resources.Dashboard{ + src := resources.Dashboard{ FilePath: "some/path", } @@ -84,7 +84,7 @@ func TestConvertDashboardFilePath(t *testing.T) { } func TestConvertDashboardFilePathQuoted(t *testing.T) { - var src = resources.Dashboard{ + src := resources.Dashboard{ FilePath: `C:\foo\bar\baz\dashboard.lvdash.json`, } @@ -108,7 +108,7 @@ func TestConvertDashboardFilePathQuoted(t *testing.T) { } func TestConvertDashboardSerializedDashboardString(t *testing.T) { - var src = resources.Dashboard{ + src := resources.Dashboard{ SerializedDashboard: `{ "json": true }`, } @@ -127,7 +127,7 @@ func TestConvertDashboardSerializedDashboardString(t *testing.T) { } func TestConvertDashboardSerializedDashboardAny(t *testing.T) { - var src = resources.Dashboard{ + src := resources.Dashboard{ SerializedDashboard: map[string]any{ "pages": []map[string]any{ { diff --git a/bundle/deploy/terraform/tfdyn/convert_experiment_test.go b/bundle/deploy/terraform/tfdyn/convert_experiment_test.go index 63add4368..3ef3963f2 100644 --- a/bundle/deploy/terraform/tfdyn/convert_experiment_test.go +++ b/bundle/deploy/terraform/tfdyn/convert_experiment_test.go @@ -14,7 +14,7 @@ import ( ) func TestConvertExperiment(t *testing.T) { - var src = resources.MlflowExperiment{ + src := resources.MlflowExperiment{ Experiment: &ml.Experiment{ Name: "name", }, diff --git a/bundle/deploy/terraform/tfdyn/convert_grants_test.go b/bundle/deploy/terraform/tfdyn/convert_grants_test.go index a486bc36f..0a263b493 100644 --- a/bundle/deploy/terraform/tfdyn/convert_grants_test.go +++ b/bundle/deploy/terraform/tfdyn/convert_grants_test.go @@ -13,7 +13,7 @@ import ( ) func TestConvertGrants(t *testing.T) { - var src = resources.RegisteredModel{ + src := resources.RegisteredModel{ Grants: []resources.Grant{ { Privileges: []string{"EXECUTE", "FOO"}, @@ -45,7 +45,7 @@ func TestConvertGrants(t *testing.T) { } func TestConvertGrantsNil(t *testing.T) { - var src = resources.RegisteredModel{ + src := resources.RegisteredModel{ Grants: nil, } @@ -58,7 +58,7 @@ func TestConvertGrantsNil(t *testing.T) { } func TestConvertGrantsEmpty(t *testing.T) { - var src = resources.RegisteredModel{ + src := resources.RegisteredModel{ Grants: []resources.Grant{}, } diff --git a/bundle/deploy/terraform/tfdyn/convert_job.go b/bundle/deploy/terraform/tfdyn/convert_job.go index 8948e3baf..bb2f8cd0f 100644 --- a/bundle/deploy/terraform/tfdyn/convert_job.go +++ b/bundle/deploy/terraform/tfdyn/convert_job.go @@ -83,7 +83,6 @@ func convertJobResource(ctx context.Context, vin dyn.Value) (dyn.Value, error) { "libraries": "library", }) }) - if err != nil { return dyn.InvalidValue, err } diff --git a/bundle/deploy/terraform/tfdyn/convert_job_test.go b/bundle/deploy/terraform/tfdyn/convert_job_test.go index 695b9ba24..c73e530d4 100644 --- a/bundle/deploy/terraform/tfdyn/convert_job_test.go +++ b/bundle/deploy/terraform/tfdyn/convert_job_test.go @@ -15,7 +15,7 @@ import ( ) func TestConvertJob(t *testing.T) { - var src = resources.Job{ + src := resources.Job{ JobSettings: &jobs.JobSettings{ Name: "my job", JobClusters: []jobs.JobCluster{ diff --git a/bundle/deploy/terraform/tfdyn/convert_model_serving_endpoint_test.go b/bundle/deploy/terraform/tfdyn/convert_model_serving_endpoint_test.go index 63b75e9ab..d46350bb7 100644 --- a/bundle/deploy/terraform/tfdyn/convert_model_serving_endpoint_test.go +++ b/bundle/deploy/terraform/tfdyn/convert_model_serving_endpoint_test.go @@ -14,7 +14,7 @@ import ( ) func TestConvertModelServingEndpoint(t *testing.T) { - var src = resources.ModelServingEndpoint{ + src := resources.ModelServingEndpoint{ CreateServingEndpoint: &serving.CreateServingEndpoint{ Name: "name", Config: serving.EndpointCoreConfigInput{ diff --git a/bundle/deploy/terraform/tfdyn/convert_model_test.go b/bundle/deploy/terraform/tfdyn/convert_model_test.go index 542caa878..4c4e62c5b 100644 --- a/bundle/deploy/terraform/tfdyn/convert_model_test.go +++ b/bundle/deploy/terraform/tfdyn/convert_model_test.go @@ -14,7 +14,7 @@ import ( ) func TestConvertModel(t *testing.T) { - var src = resources.MlflowModel{ + src := resources.MlflowModel{ Model: &ml.Model{ Name: "name", Description: "description", diff --git a/bundle/deploy/terraform/tfdyn/convert_permissions_test.go b/bundle/deploy/terraform/tfdyn/convert_permissions_test.go index ba389020f..ba04384b5 100644 --- a/bundle/deploy/terraform/tfdyn/convert_permissions_test.go +++ b/bundle/deploy/terraform/tfdyn/convert_permissions_test.go @@ -13,7 +13,7 @@ import ( ) func TestConvertPermissions(t *testing.T) { - var src = resources.Job{ + src := resources.Job{ Permissions: []resources.Permission{ { Level: "CAN_VIEW", @@ -59,7 +59,7 @@ func TestConvertPermissions(t *testing.T) { } func TestConvertPermissionsNil(t *testing.T) { - var src = resources.Job{ + src := resources.Job{ Permissions: nil, } @@ -72,7 +72,7 @@ func TestConvertPermissionsNil(t *testing.T) { } func TestConvertPermissionsEmpty(t *testing.T) { - var src = resources.Job{ + src := resources.Job{ Permissions: []resources.Permission{}, } diff --git a/bundle/deploy/terraform/tfdyn/convert_pipeline_test.go b/bundle/deploy/terraform/tfdyn/convert_pipeline_test.go index 7010d463a..0239bad18 100644 --- a/bundle/deploy/terraform/tfdyn/convert_pipeline_test.go +++ b/bundle/deploy/terraform/tfdyn/convert_pipeline_test.go @@ -14,7 +14,7 @@ import ( ) func TestConvertPipeline(t *testing.T) { - var src = resources.Pipeline{ + src := resources.Pipeline{ PipelineSpec: &pipelines.PipelineSpec{ Name: "my pipeline", Libraries: []pipelines.PipelineLibrary{ diff --git a/bundle/deploy/terraform/tfdyn/convert_quality_monitor_test.go b/bundle/deploy/terraform/tfdyn/convert_quality_monitor_test.go index f71abf43c..16b30de71 100644 --- a/bundle/deploy/terraform/tfdyn/convert_quality_monitor_test.go +++ b/bundle/deploy/terraform/tfdyn/convert_quality_monitor_test.go @@ -14,7 +14,7 @@ import ( ) func TestConvertQualityMonitor(t *testing.T) { - var src = resources.QualityMonitor{ + src := resources.QualityMonitor{ TableName: "test_table_name", CreateMonitor: &catalog.CreateMonitor{ AssetsDir: "assets_dir", diff --git a/bundle/deploy/terraform/tfdyn/convert_registered_model_test.go b/bundle/deploy/terraform/tfdyn/convert_registered_model_test.go index 77096e8d0..bf2a5ab64 100644 --- a/bundle/deploy/terraform/tfdyn/convert_registered_model_test.go +++ b/bundle/deploy/terraform/tfdyn/convert_registered_model_test.go @@ -14,7 +14,7 @@ import ( ) func TestConvertRegisteredModel(t *testing.T) { - var src = resources.RegisteredModel{ + src := resources.RegisteredModel{ CreateRegisteredModelRequest: &catalog.CreateRegisteredModelRequest{ Name: "name", CatalogName: "catalog", diff --git a/bundle/deploy/terraform/tfdyn/convert_schema_test.go b/bundle/deploy/terraform/tfdyn/convert_schema_test.go index 2efbf3e43..12822bb3c 100644 --- a/bundle/deploy/terraform/tfdyn/convert_schema_test.go +++ b/bundle/deploy/terraform/tfdyn/convert_schema_test.go @@ -14,7 +14,7 @@ import ( ) func TestConvertSchema(t *testing.T) { - var src = resources.Schema{ + src := resources.Schema{ CreateSchema: &catalog.CreateSchema{ Name: "name", CatalogName: "catalog", diff --git a/bundle/deploy/terraform/tfdyn/convert_volume_test.go b/bundle/deploy/terraform/tfdyn/convert_volume_test.go index c897ae69a..09b69489e 100644 --- a/bundle/deploy/terraform/tfdyn/convert_volume_test.go +++ b/bundle/deploy/terraform/tfdyn/convert_volume_test.go @@ -14,7 +14,7 @@ import ( ) func TestConvertVolume(t *testing.T) { - var src = resources.Volume{ + src := resources.Volume{ CreateVolumeRequestContent: &catalog.CreateVolumeRequestContent{ CatalogName: "catalog", Comment: "comment", diff --git a/bundle/deploy/terraform/tfdyn/rename_keys.go b/bundle/deploy/terraform/tfdyn/rename_keys.go index 650ffb890..95904575f 100644 --- a/bundle/deploy/terraform/tfdyn/rename_keys.go +++ b/bundle/deploy/terraform/tfdyn/rename_keys.go @@ -11,7 +11,7 @@ import ( // definition uses the plural name. This function can convert between the two. func renameKeys(v dyn.Value, rename map[string]string) (dyn.Value, error) { var err error - var acc = dyn.V(map[string]dyn.Value{}) + acc := dyn.V(map[string]dyn.Value{}) nv, err := dyn.Walk(v, func(p dyn.Path, v dyn.Value) (dyn.Value, error) { if len(p) == 0 { @@ -36,7 +36,6 @@ func renameKeys(v dyn.Value, rename map[string]string) (dyn.Value, error) { // Pass through all other values. return v, dyn.ErrSkip }) - if err != nil { return dyn.InvalidValue, err } diff --git a/bundle/deploy/terraform/unbind.go b/bundle/deploy/terraform/unbind.go index 49d65615e..494cb7ef1 100644 --- a/bundle/deploy/terraform/unbind.go +++ b/bundle/deploy/terraform/unbind.go @@ -37,6 +37,6 @@ func (*unbind) Name() string { return "terraform.Unbind" } -func Unbind(resourceType string, resourceKey string) bundle.Mutator { +func Unbind(resourceType, resourceKey string) bundle.Mutator { return &unbind{resourceType: resourceType, resourceKey: resourceKey} } diff --git a/bundle/internal/bundletest/location.go b/bundle/internal/bundletest/location.go index 2ffd621bf..5dcd9d78f 100644 --- a/bundle/internal/bundletest/location.go +++ b/bundle/internal/bundletest/location.go @@ -10,7 +10,7 @@ import ( // with the path it is loaded from. func SetLocation(b *bundle.Bundle, prefix string, locations []dyn.Location) { start := dyn.MustPathFromString(prefix) - b.Config.Mutate(func(root dyn.Value) (dyn.Value, error) { + err := b.Config.Mutate(func(root dyn.Value) (dyn.Value, error) { return dyn.Walk(root, func(p dyn.Path, v dyn.Value) (dyn.Value, error) { // If the path has the given prefix, set the location. if p.HasPrefix(start) { @@ -27,4 +27,7 @@ func SetLocation(b *bundle.Bundle, prefix string, locations []dyn.Location) { return v, dyn.ErrSkip }) }) + if err != nil { + panic("Mutate() failed: " + err.Error()) + } } diff --git a/bundle/internal/schema/annotations.go b/bundle/internal/schema/annotations.go new file mode 100644 index 000000000..91aaa4555 --- /dev/null +++ b/bundle/internal/schema/annotations.go @@ -0,0 +1,257 @@ +package main + +import ( + "bytes" + "fmt" + "os" + "reflect" + "regexp" + "slices" + "strings" + + yaml3 "gopkg.in/yaml.v3" + + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/dyn/convert" + "github.com/databricks/cli/libs/dyn/merge" + "github.com/databricks/cli/libs/dyn/yamlloader" + "github.com/databricks/cli/libs/dyn/yamlsaver" + "github.com/databricks/cli/libs/jsonschema" +) + +type annotation struct { + Description string `json:"description,omitempty"` + MarkdownDescription string `json:"markdown_description,omitempty"` + Title string `json:"title,omitempty"` + Default any `json:"default,omitempty"` + Enum []any `json:"enum,omitempty"` +} + +type annotationHandler struct { + // Annotations read from all annotation files including all overrides + parsedAnnotations annotationFile + // Missing annotations for fields that are found in config that need to be added to the annotation file + missingAnnotations annotationFile +} + +/** + * Parsed file with annotations, expected format: + * github.com/databricks/cli/bundle/config.Bundle: + * cluster_id: + * description: "Description" + */ +type annotationFile map[string]map[string]annotation + +const Placeholder = "PLACEHOLDER" + +// Adds annotations to the JSON schema reading from the annotation files. +// More details https://json-schema.org/understanding-json-schema/reference/annotations +func newAnnotationHandler(sources []string) (*annotationHandler, error) { + prev := dyn.NilValue + for _, path := range sources { + b, err := os.ReadFile(path) + if err != nil { + return nil, err + } + generated, err := yamlloader.LoadYAML(path, bytes.NewBuffer(b)) + if err != nil { + return nil, err + } + prev, err = merge.Merge(prev, generated) + if err != nil { + return nil, err + } + } + + var data annotationFile + + err := convert.ToTyped(&data, prev) + if err != nil { + return nil, err + } + + d := &annotationHandler{} + d.parsedAnnotations = data + d.missingAnnotations = annotationFile{} + return d, nil +} + +func (d *annotationHandler) addAnnotations(typ reflect.Type, s jsonschema.Schema) jsonschema.Schema { + refPath := getPath(typ) + shouldHandle := strings.HasPrefix(refPath, "github.com") + if !shouldHandle { + return s + } + + annotations := d.parsedAnnotations[refPath] + if annotations == nil { + annotations = map[string]annotation{} + } + + rootTypeAnnotation, ok := annotations[RootTypeKey] + if ok { + assignAnnotation(&s, rootTypeAnnotation) + } + + for k, v := range s.Properties { + item := annotations[k] + if item.Description == "" { + item.Description = Placeholder + + emptyAnnotations := d.missingAnnotations[refPath] + if emptyAnnotations == nil { + emptyAnnotations = map[string]annotation{} + d.missingAnnotations[refPath] = emptyAnnotations + } + emptyAnnotations[k] = item + } + assignAnnotation(v, item) + } + return s +} + +// Writes missing annotations with placeholder values back to the annotation file +func (d *annotationHandler) syncWithMissingAnnotations(outputPath string) error { + existingFile, err := os.ReadFile(outputPath) + if err != nil { + return err + } + existing, err := yamlloader.LoadYAML("", bytes.NewBuffer(existingFile)) + if err != nil { + return err + } + + for k := range d.missingAnnotations { + if !isCliPath(k) { + delete(d.missingAnnotations, k) + fmt.Printf("Missing annotations for `%s` that are not in CLI package, try to fetch latest OpenAPI spec and regenerate annotations", k) + } + } + + missingAnnotations, err := convert.FromTyped(d.missingAnnotations, dyn.NilValue) + if err != nil { + return err + } + + output, err := merge.Merge(existing, missingAnnotations) + if err != nil { + return err + } + + var outputTyped annotationFile + err = convert.ToTyped(&outputTyped, output) + if err != nil { + return err + } + + err = saveYamlWithStyle(outputPath, outputTyped) + if err != nil { + return err + } + return nil +} + +func getPath(typ reflect.Type) string { + return typ.PkgPath() + "." + typ.Name() +} + +func assignAnnotation(s *jsonschema.Schema, a annotation) { + if a.Description != Placeholder { + s.Description = a.Description + } + + if a.Default != nil { + s.Default = a.Default + } + s.MarkdownDescription = convertLinksToAbsoluteUrl(a.MarkdownDescription) + s.Title = a.Title + s.Enum = a.Enum +} + +func saveYamlWithStyle(outputPath string, annotations annotationFile) error { + annotationOrder := yamlsaver.NewOrder([]string{"description", "markdown_description", "title", "default", "enum"}) + style := map[string]yaml3.Style{} + + order := getAlphabeticalOrder(annotations) + dynMap := map[string]dyn.Value{} + for k, v := range annotations { + style[k] = yaml3.LiteralStyle + + properties := map[string]dyn.Value{} + propertiesOrder := getAlphabeticalOrder(v) + for key, value := range v { + d, err := convert.FromTyped(value, dyn.NilValue) + if d.Kind() == dyn.KindNil || err != nil { + properties[key] = dyn.NewValue(map[string]dyn.Value{}, []dyn.Location{{Line: propertiesOrder.Get(key)}}) + continue + } + val, err := yamlsaver.ConvertToMapValue(value, annotationOrder, []string{}, map[string]dyn.Value{}) + if err != nil { + return err + } + properties[key] = val.WithLocations([]dyn.Location{{Line: propertiesOrder.Get(key)}}) + } + + dynMap[k] = dyn.NewValue(properties, []dyn.Location{{Line: order.Get(k)}}) + } + + saver := yamlsaver.NewSaverWithStyle(style) + err := saver.SaveAsYAML(dynMap, outputPath, true) + if err != nil { + return err + } + return nil +} + +func getAlphabeticalOrder[T any](mapping map[string]T) *yamlsaver.Order { + order := []string{} + for k := range mapping { + order = append(order, k) + } + slices.Sort(order) + return yamlsaver.NewOrder(order) +} + +func convertLinksToAbsoluteUrl(s string) string { + if s == "" { + return s + } + base := "https://docs.databricks.com" + referencePage := "/dev-tools/bundles/reference.html" + + // Regular expression to match Markdown-style links like [_](link) + re := regexp.MustCompile(`\[_\]\(([^)]+)\)`) + result := re.ReplaceAllStringFunc(s, func(match string) string { + matches := re.FindStringSubmatch(match) + if len(matches) < 2 { + return match + } + link := matches[1] + var text, absoluteURL string + + if strings.HasPrefix(link, "#") { + text = strings.TrimPrefix(link, "#") + absoluteURL = fmt.Sprintf("%s%s%s", base, referencePage, link) + + // Handle relative paths like /dev-tools/bundles/resources.html#dashboard + } else if strings.HasPrefix(link, "/") { + absoluteURL = strings.ReplaceAll(fmt.Sprintf("%s%s", base, link), ".md", ".html") + if strings.Contains(link, "#") { + parts := strings.Split(link, "#") + text = parts[1] + } else { + text = "link" + } + } else { + return match + } + + return fmt.Sprintf("[%s](%s)", text, absoluteURL) + }) + + return result +} + +func isCliPath(path string) bool { + return !strings.HasPrefix(path, "github.com/databricks/databricks-sdk-go") +} diff --git a/bundle/internal/schema/annotations.yml b/bundle/internal/schema/annotations.yml new file mode 100644 index 000000000..84f6753e3 --- /dev/null +++ b/bundle/internal/schema/annotations.yml @@ -0,0 +1,440 @@ +github.com/databricks/cli/bundle/config.Artifact: + "build": + "description": |- + An optional set of non-default build commands that you want to run locally before deployment. + + For Python wheel builds, the Databricks CLI assumes that it can find a local install of the Python wheel package to run builds, and it runs the command python setup.py bdist_wheel by default during each bundle deployment. + + To specify multiple build commands, separate each command with double-ampersand (&&) characters. + "executable": + "description": |- + The executable type. + "files": + "description": |- + The source files for the artifact. + "markdown_description": |- + The source files for the artifact, defined as an [_](#artifact_file). + "path": + "description": |- + The location where the built artifact will be saved. + "type": + "description": |- + The type of the artifact. + "markdown_description": |- + The type of the artifact. Valid values are `wheel` or `jar` +github.com/databricks/cli/bundle/config.ArtifactFile: + "source": + "description": |- + The path of the files used to build the artifact. +github.com/databricks/cli/bundle/config.Bundle: + "cluster_id": + "description": |- + The ID of a cluster to use to run the bundle. + "markdown_description": |- + The ID of a cluster to use to run the bundle. See [_](/dev-tools/bundles/settings.md#cluster_id). + "compute_id": + "description": |- + PLACEHOLDER + "databricks_cli_version": + "description": |- + The Databricks CLI version to use for the bundle. + "markdown_description": |- + The Databricks CLI version to use for the bundle. See [_](/dev-tools/bundles/settings.md#databricks_cli_version). + "deployment": + "description": |- + The definition of the bundle deployment + "markdown_description": |- + The definition of the bundle deployment. For supported attributes, see [_](#deployment) and [_](/dev-tools/bundles/deployment-modes.md). + "git": + "description": |- + The Git version control details that are associated with your bundle. + "markdown_description": |- + The Git version control details that are associated with your bundle. For supported attributes, see [_](#git) and [_](/dev-tools/bundles/settings.md#git). + "name": + "description": |- + The name of the bundle. + "uuid": + "description": |- + PLACEHOLDER +github.com/databricks/cli/bundle/config.Deployment: + "fail_on_active_runs": + "description": |- + Whether to fail on active runs. If this is set to true a deployment that is running can be interrupted. + "lock": + "description": |- + The deployment lock attributes. + "markdown_description": |- + The deployment lock attributes. See [_](#lock). +github.com/databricks/cli/bundle/config.Experimental: + "pydabs": + "description": |- + The PyDABs configuration. + "python_wheel_wrapper": + "description": |- + Whether to use a Python wheel wrapper + "scripts": + "description": |- + The commands to run + "use_legacy_run_as": + "description": |- + Whether to use the legacy run_as behavior +github.com/databricks/cli/bundle/config.Git: + "branch": + "description": |- + The Git branch name. + "markdown_description": |- + The Git branch name. See [_](/dev-tools/bundles/settings.md#git). + "origin_url": + "description": |- + The origin URL of the repository. + "markdown_description": |- + The origin URL of the repository. See [_](/dev-tools/bundles/settings.md#git). +github.com/databricks/cli/bundle/config.Lock: + "enabled": + "description": |- + Whether this lock is enabled. + "force": + "description": |- + Whether to force this lock if it is enabled. +github.com/databricks/cli/bundle/config.Presets: + "jobs_max_concurrent_runs": + "description": |- + The maximum concurrent runs for a job. + "name_prefix": + "description": |- + The prefix for job runs of the bundle. + "pipelines_development": + "description": |- + Whether pipeline deployments should be locked in development mode. + "source_linked_deployment": + "description": |- + Whether to link the deployment to the bundle source. + "tags": + "description": |- + The tags for the bundle deployment. + "trigger_pause_status": + "description": |- + A pause status to apply to all job triggers and schedules. Valid values are PAUSED or UNPAUSED. +github.com/databricks/cli/bundle/config.PyDABs: + "enabled": + "description": |- + Whether or not PyDABs (Private Preview) is enabled + "import": + "description": |- + The PyDABs project to import to discover resources, resource generator and mutators + "venv_path": + "description": |- + The Python virtual environment path +github.com/databricks/cli/bundle/config.Resources: + "clusters": + "description": |- + The cluster definitions for the bundle. + "markdown_description": |- + The cluster definitions for the bundle. See [_](/dev-tools/bundles/resources.md#cluster) + "dashboards": + "description": |- + The dashboard definitions for the bundle. + "markdown_description": |- + The dashboard definitions for the bundle. See [_](/dev-tools/bundles/resources.md#dashboard) + "experiments": + "description": |- + The experiment definitions for the bundle. + "markdown_description": |- + The experiment definitions for the bundle. See [_](/dev-tools/bundles/resources.md#experiment) + "jobs": + "description": |- + The job definitions for the bundle. + "markdown_description": |- + The job definitions for the bundle. See [_](/dev-tools/bundles/resources.md#job) + "model_serving_endpoints": + "description": |- + The model serving endpoint definitions for the bundle. + "markdown_description": |- + The model serving endpoint definitions for the bundle. See [_](/dev-tools/bundles/resources.md#model_serving_endpoint) + "models": + "description": |- + The model definitions for the bundle. + "markdown_description": |- + The model definitions for the bundle. See [_](/dev-tools/bundles/resources.md#model) + "pipelines": + "description": |- + The pipeline definitions for the bundle. + "markdown_description": |- + The pipeline definitions for the bundle. See [_](/dev-tools/bundles/resources.md#pipeline) + "quality_monitors": + "description": |- + The quality monitor definitions for the bundle. + "markdown_description": |- + The quality monitor definitions for the bundle. See [_](/dev-tools/bundles/resources.md#quality_monitor) + "registered_models": + "description": |- + The registered model definitions for the bundle. + "markdown_description": |- + The registered model definitions for the bundle. See [_](/dev-tools/bundles/resources.md#registered_model) + "schemas": + "description": |- + The schema definitions for the bundle. + "markdown_description": |- + The schema definitions for the bundle. See [_](/dev-tools/bundles/resources.md#schema) + "volumes": + "description": |- + PLACEHOLDER +github.com/databricks/cli/bundle/config.Root: + "artifacts": + "description": |- + Defines the attributes to build an artifact + "bundle": + "description": |- + The attributes of the bundle. + "markdown_description": |- + The attributes of the bundle. See [_](/dev-tools/bundles/settings.md#bundle) + "experimental": + "description": |- + Defines attributes for experimental features. + "include": + "description": |- + Specifies a list of path globs that contain configuration files to include within the bundle. + "markdown_description": |- + Specifies a list of path globs that contain configuration files to include within the bundle. See [_](/dev-tools/bundles/settings.md#include) + "permissions": + "description": |- + Defines the permissions to apply to experiments, jobs, pipelines, and models defined in the bundle + "markdown_description": |- + Defines the permissions to apply to experiments, jobs, pipelines, and models defined in the bundle. See [_](/dev-tools/bundles/settings.md#permissions) and [_](/dev-tools/bundles/permissions.md). + "presets": + "description": |- + Defines bundle deployment presets. + "markdown_description": |- + Defines bundle deployment presets. See [_](/dev-tools/bundles/deployment-modes.md#presets). + "resources": + "description": |- + Specifies information about the Databricks resources used by the bundle + "markdown_description": |- + Specifies information about the Databricks resources used by the bundle. See [_](/dev-tools/bundles/resources.md). + "run_as": + "description": |- + The identity to use to run the bundle. + "sync": + "description": |- + The files and file paths to include or exclude in the bundle. + "markdown_description": |- + The files and file paths to include or exclude in the bundle. See [_](/dev-tools/bundles/) + "targets": + "description": |- + Defines deployment targets for the bundle. + "variables": + "description": |- + A Map that defines the custom variables for the bundle, where each key is the name of the variable, and the value is a Map that defines the variable. + "workspace": + "description": |- + Defines the Databricks workspace for the bundle. +github.com/databricks/cli/bundle/config.Sync: + "exclude": + "description": |- + A list of files or folders to exclude from the bundle. + "include": + "description": |- + A list of files or folders to include in the bundle. + "paths": + "description": |- + The local folder paths, which can be outside the bundle root, to synchronize to the workspace when the bundle is deployed. +github.com/databricks/cli/bundle/config.Target: + "artifacts": + "description": |- + The artifacts to include in the target deployment. + "markdown_description": |- + The artifacts to include in the target deployment. See [_](#artifact) + "bundle": + "description": |- + The name of the bundle when deploying to this target. + "cluster_id": + "description": |- + The ID of the cluster to use for this target. + "compute_id": + "description": |- + Deprecated. The ID of the compute to use for this target. + "default": + "description": |- + Whether this target is the default target. + "git": + "description": |- + The Git version control settings for the target. + "markdown_description": |- + The Git version control settings for the target. See [_](#git). + "mode": + "description": |- + The deployment mode for the target. + "markdown_description": |- + The deployment mode for the target. Valid values are `development` or `production`. See [_](/dev-tools/bundles/deployment-modes.md). + "permissions": + "description": |- + The permissions for deploying and running the bundle in the target. + "markdown_description": |- + The permissions for deploying and running the bundle in the target. See [_](#permission). + "presets": + "description": |- + The deployment presets for the target. + "markdown_description": |- + The deployment presets for the target. See [_](#preset). + "resources": + "description": |- + The resource definitions for the target. + "markdown_description": |- + The resource definitions for the target. See [_](#resources). + "run_as": + "description": |- + The identity to use to run the bundle. + "markdown_description": |- + The identity to use to run the bundle. See [_](#job_run_as) and [_](/dev-tools/bundles/run_as.md). + "sync": + "description": |- + The local paths to sync to the target workspace when a bundle is run or deployed. + "markdown_description": |- + The local paths to sync to the target workspace when a bundle is run or deployed. See [_](#sync). + "variables": + "description": |- + The custom variable definitions for the target. + "markdown_description": |- + The custom variable definitions for the target. See [_](/dev-tools/bundles/settings.md#variables) and [_](/dev-tools/bundles/variables.md). + "workspace": + "description": |- + The Databricks workspace for the target. + "markdown_description": |- + The Databricks workspace for the target. [_](#workspace) +github.com/databricks/cli/bundle/config.Workspace: + "artifact_path": + "description": |- + The artifact path to use within the workspace for both deployments and workflow runs + "auth_type": + "description": |- + The authentication type. + "azure_client_id": + "description": |- + The Azure client ID + "azure_environment": + "description": |- + The Azure environment + "azure_login_app_id": + "description": |- + The Azure login app ID + "azure_tenant_id": + "description": |- + The Azure tenant ID + "azure_use_msi": + "description": |- + Whether to use MSI for Azure + "azure_workspace_resource_id": + "description": |- + The Azure workspace resource ID + "client_id": + "description": |- + The client ID for the workspace + "file_path": + "description": |- + The file path to use within the workspace for both deployments and workflow runs + "google_service_account": + "description": |- + The Google service account name + "host": + "description": |- + The Databricks workspace host URL + "profile": + "description": |- + The Databricks workspace profile name + "resource_path": + "description": |- + The workspace resource path + "root_path": + "description": |- + The Databricks workspace root path + "state_path": + "description": |- + The workspace state path +github.com/databricks/cli/bundle/config/resources.Grant: + "principal": + "description": |- + The name of the principal that will be granted privileges + "privileges": + "description": |- + The privileges to grant to the specified entity +github.com/databricks/cli/bundle/config/resources.Permission: + "group_name": + "description": |- + The name of the group that has the permission set in level. + "level": + "description": |- + The allowed permission for user, group, service principal defined for this permission. + "service_principal_name": + "description": |- + The name of the service principal that has the permission set in level. + "user_name": + "description": |- + The name of the user that has the permission set in level. +github.com/databricks/cli/bundle/config/variable.Lookup: + "alert": + "description": |- + PLACEHOLDER + "cluster": + "description": |- + PLACEHOLDER + "cluster_policy": + "description": |- + PLACEHOLDER + "dashboard": + "description": |- + PLACEHOLDER + "instance_pool": + "description": |- + PLACEHOLDER + "job": + "description": |- + PLACEHOLDER + "metastore": + "description": |- + PLACEHOLDER + "notification_destination": + "description": |- + PLACEHOLDER + "pipeline": + "description": |- + PLACEHOLDER + "query": + "description": |- + PLACEHOLDER + "service_principal": + "description": |- + PLACEHOLDER + "warehouse": + "description": |- + PLACEHOLDER +github.com/databricks/cli/bundle/config/variable.TargetVariable: + "default": + "description": |- + PLACEHOLDER + "description": + "description": |- + The description of the variable. + "lookup": + "description": |- + The name of the alert, cluster_policy, cluster, dashboard, instance_pool, job, metastore, pipeline, query, service_principal, or warehouse object for which to retrieve an ID. + "markdown_description": + "description": |- + The type of the variable. + "type": + "description": |- + The type of the variable. +github.com/databricks/cli/bundle/config/variable.Variable: + "default": + "description": |- + PLACEHOLDER + "description": + "description": |- + The description of the variable + "lookup": + "description": |- + The name of the alert, cluster_policy, cluster, dashboard, instance_pool, job, metastore, pipeline, query, service_principal, or warehouse object for which to retrieve an ID. + "markdown_description": |- + The name of the `alert`, `cluster_policy`, `cluster`, `dashboard`, `instance_pool`, `job`, `metastore`, `pipeline`, `query`, `service_principal`, or `warehouse` object for which to retrieve an ID." + "type": + "description": |- + The type of the variable. diff --git a/bundle/internal/schema/annotations_openapi.yml b/bundle/internal/schema/annotations_openapi.yml new file mode 100644 index 000000000..e9c893c87 --- /dev/null +++ b/bundle/internal/schema/annotations_openapi.yml @@ -0,0 +1,2924 @@ +# This file is auto-generated. DO NOT EDIT. +github.com/databricks/cli/bundle/config/resources.Cluster: + "apply_policy_default_values": + "description": |- + When set to true, fixed and default values from the policy will be used for fields that are omitted. When set to false, only fixed values from the policy will be applied. + "autoscale": + "description": |- + Parameters needed in order to automatically scale clusters up and down based on load. + Note: autoscaling works best with DB runtime versions 3.0 or later. + "autotermination_minutes": + "description": |- + Automatically terminates the cluster after it is inactive for this time in minutes. If not set, + this cluster will not be automatically terminated. If specified, the threshold must be between + 10 and 10000 minutes. + Users can also set this value to 0 to explicitly disable automatic termination. + "aws_attributes": + "description": |- + Attributes related to clusters running on Amazon Web Services. + If not specified at cluster creation, a set of default values will be used. + "azure_attributes": + "description": |- + Attributes related to clusters running on Microsoft Azure. + If not specified at cluster creation, a set of default values will be used. + "cluster_log_conf": + "description": |- + The configuration for delivering spark logs to a long-term storage destination. + Two kinds of destinations (dbfs and s3) are supported. Only one destination can be specified + for one cluster. If the conf is given, the logs will be delivered to the destination every + `5 mins`. The destination of driver logs is `$destination/$clusterId/driver`, while + the destination of executor logs is `$destination/$clusterId/executor`. + "cluster_name": + "description": | + Cluster name requested by the user. This doesn't have to be unique. + If not specified at creation, the cluster name will be an empty string. + "custom_tags": + "description": |- + Additional tags for cluster resources. Databricks will tag all cluster resources (e.g., AWS + instances and EBS volumes) with these tags in addition to `default_tags`. Notes: + + - Currently, Databricks allows at most 45 custom tags + + - Clusters can only reuse cloud resources if the resources' tags are a subset of the cluster tags + "data_security_mode": {} + "docker_image": {} + "driver_instance_pool_id": + "description": |- + The optional ID of the instance pool for the driver of the cluster belongs. + The pool cluster uses the instance pool with id (instance_pool_id) if the driver pool is not + assigned. + "driver_node_type_id": + "description": | + The node type of the Spark driver. Note that this field is optional; + if unset, the driver node type will be set as the same value + as `node_type_id` defined above. + "enable_elastic_disk": + "description": |- + Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk + space when its Spark workers are running low on disk space. This feature requires specific AWS + permissions to function correctly - refer to the User Guide for more details. + "enable_local_disk_encryption": + "description": |- + Whether to enable LUKS on cluster VMs' local disks + "gcp_attributes": + "description": |- + Attributes related to clusters running on Google Cloud Platform. + If not specified at cluster creation, a set of default values will be used. + "init_scripts": + "description": |- + The configuration for storing init scripts. Any number of destinations can be specified. The scripts are executed sequentially in the order provided. If `cluster_log_conf` is specified, init script logs are sent to `//init_scripts`. + "instance_pool_id": + "description": |- + The optional ID of the instance pool to which the cluster belongs. + "is_single_node": + "description": | + This field can only be used with `kind`. + + When set to true, Databricks will automatically set single node related `custom_tags`, `spark_conf`, and `num_workers` + "kind": {} + "node_type_id": + "description": | + This field encodes, through a single value, the resources available to each of + the Spark nodes in this cluster. For example, the Spark nodes can be provisioned + and optimized for memory or compute intensive workloads. A list of available node + types can be retrieved by using the :method:clusters/listNodeTypes API call. + "num_workers": + "description": |- + Number of worker nodes that this cluster should have. A cluster has one Spark Driver + and `num_workers` Executors for a total of `num_workers` + 1 Spark nodes. + + Note: When reading the properties of a cluster, this field reflects the desired number + of workers rather than the actual current number of workers. For instance, if a cluster + is resized from 5 to 10 workers, this field will immediately be updated to reflect + the target size of 10 workers, whereas the workers listed in `spark_info` will gradually + increase from 5 to 10 as the new nodes are provisioned. + "policy_id": + "description": |- + The ID of the cluster policy used to create the cluster if applicable. + "runtime_engine": {} + "single_user_name": + "description": |- + Single user name if data_security_mode is `SINGLE_USER` + "spark_conf": + "description": | + An object containing a set of optional, user-specified Spark configuration key-value pairs. + Users can also pass in a string of extra JVM options to the driver and the executors via + `spark.driver.extraJavaOptions` and `spark.executor.extraJavaOptions` respectively. + "spark_env_vars": + "description": |- + An object containing a set of optional, user-specified environment variable key-value pairs. + Please note that key-value pair of the form (X,Y) will be exported as is (i.e., + `export X='Y'`) while launching the driver and workers. + + In order to specify an additional set of `SPARK_DAEMON_JAVA_OPTS`, we recommend appending + them to `$SPARK_DAEMON_JAVA_OPTS` as shown in the example below. This ensures that all + default databricks managed environmental variables are included as well. + + Example Spark environment variables: + `{"SPARK_WORKER_MEMORY": "28000m", "SPARK_LOCAL_DIRS": "/local_disk0"}` or + `{"SPARK_DAEMON_JAVA_OPTS": "$SPARK_DAEMON_JAVA_OPTS -Dspark.shuffle.service.enabled=true"}` + "spark_version": + "description": | + The Spark version of the cluster, e.g. `3.3.x-scala2.11`. + A list of available Spark versions can be retrieved by using + the :method:clusters/sparkVersions API call. + "ssh_public_keys": + "description": |- + SSH public key contents that will be added to each Spark node in this cluster. The + corresponding private keys can be used to login with the user name `ubuntu` on port `2200`. + Up to 10 keys can be specified. + "use_ml_runtime": + "description": | + This field can only be used with `kind`. + + `effective_spark_version` is determined by `spark_version` (DBR release), this field `use_ml_runtime`, and whether `node_type_id` is gpu node or not. + "workload_type": {} +github.com/databricks/cli/bundle/config/resources.Dashboard: + "create_time": + "description": |- + The timestamp of when the dashboard was created. + "dashboard_id": + "description": |- + UUID identifying the dashboard. + "display_name": + "description": |- + The display name of the dashboard. + "etag": + "description": |- + The etag for the dashboard. Can be optionally provided on updates to ensure that the dashboard + has not been modified since the last read. + This field is excluded in List Dashboards responses. + "lifecycle_state": + "description": |- + The state of the dashboard resource. Used for tracking trashed status. + "parent_path": + "description": |- + The workspace path of the folder containing the dashboard. Includes leading slash and no + trailing slash. + This field is excluded in List Dashboards responses. + "path": + "description": |- + The workspace path of the dashboard asset, including the file name. + Exported dashboards always have the file extension `.lvdash.json`. + This field is excluded in List Dashboards responses. + "serialized_dashboard": + "description": |- + The contents of the dashboard in serialized string form. + This field is excluded in List Dashboards responses. + Use the [get dashboard API](https://docs.databricks.com/api/workspace/lakeview/get) + to retrieve an example response, which includes the `serialized_dashboard` field. + This field provides the structure of the JSON string that represents the dashboard's + layout and components. + "update_time": + "description": |- + The timestamp of when the dashboard was last updated by the user. + This field is excluded in List Dashboards responses. + "warehouse_id": + "description": |- + The warehouse ID used to run the dashboard. +github.com/databricks/cli/bundle/config/resources.Job: + "budget_policy_id": + "description": |- + The id of the user specified budget policy to use for this job. + If not specified, a default budget policy may be applied when creating or modifying the job. + See `effective_budget_policy_id` for the budget policy used by this workload. + "continuous": + "description": |- + An optional continuous property for this job. The continuous property will ensure that there is always one run executing. Only one of `schedule` and `continuous` can be used. + "deployment": + "description": |- + Deployment information for jobs managed by external sources. + "description": + "description": |- + An optional description for the job. The maximum length is 27700 characters in UTF-8 encoding. + "edit_mode": + "description": |- + Edit mode of the job. + + * `UI_LOCKED`: The job is in a locked UI state and cannot be modified. + * `EDITABLE`: The job is in an editable state and can be modified. + "email_notifications": + "description": |- + An optional set of email addresses that is notified when runs of this job begin or complete as well as when this job is deleted. + "environments": + "description": |- + A list of task execution environment specifications that can be referenced by serverless tasks of this job. + An environment is required to be present for serverless tasks. + For serverless notebook tasks, the environment is accessible in the notebook environment panel. + For other serverless tasks, the task environment is required to be specified using environment_key in the task settings. + "format": + "description": |- + Used to tell what is the format of the job. This field is ignored in Create/Update/Reset calls. When using the Jobs API 2.1 this value is always set to `"MULTI_TASK"`. + "git_source": + "description": |- + An optional specification for a remote Git repository containing the source code used by tasks. Version-controlled source code is supported by notebook, dbt, Python script, and SQL File tasks. + + If `git_source` is set, these tasks retrieve the file from the remote repository by default. However, this behavior can be overridden by setting `source` to `WORKSPACE` on the task. + + Note: dbt and SQL File tasks support only version-controlled sources. If dbt or SQL File tasks are used, `git_source` must be defined on the job. + "health": {} + "job_clusters": + "description": |- + A list of job cluster specifications that can be shared and reused by tasks of this job. Libraries cannot be declared in a shared job cluster. You must declare dependent libraries in task settings. + "max_concurrent_runs": + "description": |- + An optional maximum allowed number of concurrent runs of the job. + Set this value if you want to be able to execute multiple runs of the same job concurrently. + This is useful for example if you trigger your job on a frequent schedule and want to allow consecutive runs to overlap with each other, or if you want to trigger multiple runs which differ by their input parameters. + This setting affects only new runs. For example, suppose the job’s concurrency is 4 and there are 4 concurrent active runs. Then setting the concurrency to 3 won’t kill any of the active runs. + However, from then on, new runs are skipped unless there are fewer than 3 active runs. + This value cannot exceed 1000. Setting this value to `0` causes all new runs to be skipped. + "name": + "description": |- + An optional name for the job. The maximum length is 4096 bytes in UTF-8 encoding. + "notification_settings": + "description": |- + Optional notification settings that are used when sending notifications to each of the `email_notifications` and `webhook_notifications` for this job. + "parameters": + "description": |- + Job-level parameter definitions + "queue": + "description": |- + The queue settings of the job. + "run_as": {} + "schedule": + "description": |- + An optional periodic schedule for this job. The default behavior is that the job only runs when triggered by clicking “Run Now” in the Jobs UI or sending an API request to `runNow`. + "tags": + "description": |- + A map of tags associated with the job. These are forwarded to the cluster as cluster tags for jobs clusters, and are subject to the same limitations as cluster tags. A maximum of 25 tags can be added to the job. + "tasks": + "description": |- + A list of task specifications to be executed by this job. + "timeout_seconds": + "description": |- + An optional timeout applied to each run of this job. A value of `0` means no timeout. + "trigger": + "description": |- + A configuration to trigger a run when certain conditions are met. The default behavior is that the job runs only when triggered by clicking “Run Now” in the Jobs UI or sending an API request to `runNow`. + "webhook_notifications": + "description": |- + A collection of system notification IDs to notify when runs of this job begin or complete. +github.com/databricks/cli/bundle/config/resources.MlflowExperiment: + "artifact_location": + "description": |- + Location where artifacts for the experiment are stored. + "creation_time": + "description": |- + Creation time + "experiment_id": + "description": |- + Unique identifier for the experiment. + "last_update_time": + "description": |- + Last update time + "lifecycle_stage": + "description": |- + Current life cycle stage of the experiment: "active" or "deleted". + Deleted experiments are not returned by APIs. + "name": + "description": |- + Human readable name that identifies the experiment. + "tags": + "description": |- + Tags: Additional metadata key-value pairs. +github.com/databricks/cli/bundle/config/resources.MlflowModel: + "creation_timestamp": + "description": |- + Timestamp recorded when this `registered_model` was created. + "description": + "description": |- + Description of this `registered_model`. + "last_updated_timestamp": + "description": |- + Timestamp recorded when metadata for this `registered_model` was last updated. + "latest_versions": + "description": |- + Collection of latest model versions for each stage. + Only contains models with current `READY` status. + "name": + "description": |- + Unique name for the model. + "tags": + "description": |- + Tags: Additional metadata key-value pairs for this `registered_model`. + "user_id": + "description": |- + User that created this `registered_model` +github.com/databricks/cli/bundle/config/resources.ModelServingEndpoint: + "ai_gateway": + "description": |- + The AI Gateway configuration for the serving endpoint. NOTE: only external model endpoints are supported as of now. + "config": + "description": |- + The core config of the serving endpoint. + "name": + "description": | + The name of the serving endpoint. This field is required and must be unique across a Databricks workspace. + An endpoint name can consist of alphanumeric characters, dashes, and underscores. + "rate_limits": + "description": |- + Rate limits to be applied to the serving endpoint. NOTE: this field is deprecated, please use AI Gateway to manage rate limits. + "route_optimized": + "description": |- + Enable route optimization for the serving endpoint. + "tags": + "description": |- + Tags to be attached to the serving endpoint and automatically propagated to billing logs. +github.com/databricks/cli/bundle/config/resources.Pipeline: + "budget_policy_id": + "description": |- + Budget policy of this pipeline. + "catalog": + "description": |- + A catalog in Unity Catalog to publish data from this pipeline to. If `target` is specified, tables in this pipeline are published to a `target` schema inside `catalog` (for example, `catalog`.`target`.`table`). If `target` is not specified, no data is published to Unity Catalog. + "channel": + "description": |- + DLT Release Channel that specifies which version to use. + "clusters": + "description": |- + Cluster settings for this pipeline deployment. + "configuration": + "description": |- + String-String configuration for this pipeline execution. + "continuous": + "description": |- + Whether the pipeline is continuous or triggered. This replaces `trigger`. + "deployment": + "description": |- + Deployment type of this pipeline. + "development": + "description": |- + Whether the pipeline is in Development mode. Defaults to false. + "edition": + "description": |- + Pipeline product edition. + "filters": + "description": |- + Filters on which Pipeline packages to include in the deployed graph. + "gateway_definition": + "description": |- + The definition of a gateway pipeline to support change data capture. + "id": + "description": |- + Unique identifier for this pipeline. + "ingestion_definition": + "description": |- + The configuration for a managed ingestion pipeline. These settings cannot be used with the 'libraries', 'target' or 'catalog' settings. + "libraries": + "description": |- + Libraries or code needed by this deployment. + "name": + "description": |- + Friendly identifier for this pipeline. + "notifications": + "description": |- + List of notification settings for this pipeline. + "photon": + "description": |- + Whether Photon is enabled for this pipeline. + "restart_window": + "description": |- + Restart window of this pipeline. + "schema": + "description": |- + The default schema (database) where tables are read from or published to. The presence of this field implies that the pipeline is in direct publishing mode. + "serverless": + "description": |- + Whether serverless compute is enabled for this pipeline. + "storage": + "description": |- + DBFS root directory for storing checkpoints and tables. + "target": + "description": |- + Target schema (database) to add tables in this pipeline to. If not specified, no data is published to the Hive metastore or Unity Catalog. To publish to Unity Catalog, also specify `catalog`. + "trigger": + "description": |- + Which pipeline trigger to use. Deprecated: Use `continuous` instead. +github.com/databricks/cli/bundle/config/resources.QualityMonitor: + "assets_dir": + "description": |- + The directory to store monitoring assets (e.g. dashboard, metric tables). + "baseline_table_name": + "description": | + Name of the baseline table from which drift metrics are computed from. + Columns in the monitored table should also be present in the baseline table. + "custom_metrics": + "description": | + Custom metrics to compute on the monitored table. These can be aggregate metrics, derived + metrics (from already computed aggregate metrics), or drift metrics (comparing metrics across time + windows). + "data_classification_config": + "description": |- + The data classification config for the monitor. + "inference_log": + "description": |- + Configuration for monitoring inference logs. + "notifications": + "description": |- + The notification settings for the monitor. + "output_schema_name": + "description": |- + Schema where output metric tables are created. + "schedule": + "description": |- + The schedule for automatically updating and refreshing metric tables. + "skip_builtin_dashboard": + "description": |- + Whether to skip creating a default dashboard summarizing data quality metrics. + "slicing_exprs": + "description": | + List of column expressions to slice data with for targeted analysis. The data is grouped by + each expression independently, resulting in a separate slice for each predicate and its + complements. For high-cardinality columns, only the top 100 unique values by frequency will + generate slices. + "snapshot": + "description": |- + Configuration for monitoring snapshot tables. + "time_series": + "description": |- + Configuration for monitoring time series tables. + "warehouse_id": + "description": | + Optional argument to specify the warehouse for dashboard creation. If not specified, the first running + warehouse will be used. +github.com/databricks/cli/bundle/config/resources.RegisteredModel: + "catalog_name": + "description": |- + The name of the catalog where the schema and the registered model reside + "comment": + "description": |- + The comment attached to the registered model + "name": + "description": |- + The name of the registered model + "schema_name": + "description": |- + The name of the schema where the registered model resides + "storage_location": + "description": |- + The storage location on the cloud under which model version data files are stored +github.com/databricks/cli/bundle/config/resources.Schema: + "catalog_name": + "description": |- + Name of parent catalog. + "comment": + "description": |- + User-provided free-form text description. + "name": + "description": |- + Name of schema, relative to parent catalog. + "properties": {} + "storage_root": + "description": |- + Storage root URL for managed tables within schema. +github.com/databricks/cli/bundle/config/resources.Volume: + "catalog_name": + "description": |- + The name of the catalog where the schema and the volume are + "comment": + "description": |- + The comment attached to the volume + "name": + "description": |- + The name of the volume + "schema_name": + "description": |- + The name of the schema where the volume is + "storage_location": + "description": |- + The storage location on the cloud + "volume_type": {} +github.com/databricks/databricks-sdk-go/service/catalog.MonitorCronSchedule: + "pause_status": + "description": |- + Read only field that indicates whether a schedule is paused or not. + "quartz_cron_expression": + "description": | + The expression that determines when to run the monitor. See [examples](https://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/crontrigger.html). + "timezone_id": + "description": | + The timezone id (e.g., ``"PST"``) in which to evaluate the quartz expression. +github.com/databricks/databricks-sdk-go/service/catalog.MonitorCronSchedulePauseStatus: + "_": + "description": |- + Read only field that indicates whether a schedule is paused or not. + "enum": + - |- + UNPAUSED + - |- + PAUSED +github.com/databricks/databricks-sdk-go/service/catalog.MonitorDataClassificationConfig: + "enabled": + "description": |- + Whether data classification is enabled. +github.com/databricks/databricks-sdk-go/service/catalog.MonitorDestination: + "email_addresses": + "description": |- + The list of email addresses to send the notification to. A maximum of 5 email addresses is supported. +github.com/databricks/databricks-sdk-go/service/catalog.MonitorInferenceLog: + "granularities": + "description": | + Granularities for aggregating data into time windows based on their timestamp. Currently the following static + granularities are supported: + {``"5 minutes"``, ``"30 minutes"``, ``"1 hour"``, ``"1 day"``, ``" week(s)"``, ``"1 month"``, ``"1 year"``}. + "label_col": + "description": |- + Optional column that contains the ground truth for the prediction. + "model_id_col": + "description": | + Column that contains the id of the model generating the predictions. Metrics will be computed per model id by + default, and also across all model ids. + "prediction_col": + "description": |- + Column that contains the output/prediction from the model. + "prediction_proba_col": + "description": | + Optional column that contains the prediction probabilities for each class in a classification problem type. + The values in this column should be a map, mapping each class label to the prediction probability for a given + sample. The map should be of PySpark MapType(). + "problem_type": + "description": |- + Problem type the model aims to solve. Determines the type of model-quality metrics that will be computed. + "timestamp_col": + "description": | + Column that contains the timestamps of requests. The column must be one of the following: + - A ``TimestampType`` column + - A column whose values can be converted to timestamps through the pyspark + ``to_timestamp`` [function](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.to_timestamp.html). +github.com/databricks/databricks-sdk-go/service/catalog.MonitorInferenceLogProblemType: + "_": + "description": |- + Problem type the model aims to solve. Determines the type of model-quality metrics that will be computed. + "enum": + - |- + PROBLEM_TYPE_CLASSIFICATION + - |- + PROBLEM_TYPE_REGRESSION +github.com/databricks/databricks-sdk-go/service/catalog.MonitorMetric: + "definition": + "description": |- + Jinja template for a SQL expression that specifies how to compute the metric. See [create metric definition](https://docs.databricks.com/en/lakehouse-monitoring/custom-metrics.html#create-definition). + "input_columns": + "description": | + A list of column names in the input table the metric should be computed for. + Can use ``":table"`` to indicate that the metric needs information from multiple columns. + "name": + "description": |- + Name of the metric in the output tables. + "output_data_type": + "description": |- + The output type of the custom metric. + "type": + "description": | + Can only be one of ``"CUSTOM_METRIC_TYPE_AGGREGATE"``, ``"CUSTOM_METRIC_TYPE_DERIVED"``, or ``"CUSTOM_METRIC_TYPE_DRIFT"``. + The ``"CUSTOM_METRIC_TYPE_AGGREGATE"`` and ``"CUSTOM_METRIC_TYPE_DERIVED"`` metrics + are computed on a single table, whereas the ``"CUSTOM_METRIC_TYPE_DRIFT"`` compare metrics across + baseline and input table, or across the two consecutive time windows. + - CUSTOM_METRIC_TYPE_AGGREGATE: only depend on the existing columns in your table + - CUSTOM_METRIC_TYPE_DERIVED: depend on previously computed aggregate metrics + - CUSTOM_METRIC_TYPE_DRIFT: depend on previously computed aggregate or derived metrics +github.com/databricks/databricks-sdk-go/service/catalog.MonitorMetricType: + "_": + "description": | + Can only be one of ``"CUSTOM_METRIC_TYPE_AGGREGATE"``, ``"CUSTOM_METRIC_TYPE_DERIVED"``, or ``"CUSTOM_METRIC_TYPE_DRIFT"``. + The ``"CUSTOM_METRIC_TYPE_AGGREGATE"`` and ``"CUSTOM_METRIC_TYPE_DERIVED"`` metrics + are computed on a single table, whereas the ``"CUSTOM_METRIC_TYPE_DRIFT"`` compare metrics across + baseline and input table, or across the two consecutive time windows. + - CUSTOM_METRIC_TYPE_AGGREGATE: only depend on the existing columns in your table + - CUSTOM_METRIC_TYPE_DERIVED: depend on previously computed aggregate metrics + - CUSTOM_METRIC_TYPE_DRIFT: depend on previously computed aggregate or derived metrics + "enum": + - |- + CUSTOM_METRIC_TYPE_AGGREGATE + - |- + CUSTOM_METRIC_TYPE_DERIVED + - |- + CUSTOM_METRIC_TYPE_DRIFT +github.com/databricks/databricks-sdk-go/service/catalog.MonitorNotifications: + "on_failure": + "description": |- + Who to send notifications to on monitor failure. + "on_new_classification_tag_detected": + "description": |- + Who to send notifications to when new data classification tags are detected. +github.com/databricks/databricks-sdk-go/service/catalog.MonitorSnapshot: {} +github.com/databricks/databricks-sdk-go/service/catalog.MonitorTimeSeries: + "granularities": + "description": | + Granularities for aggregating data into time windows based on their timestamp. Currently the following static + granularities are supported: + {``"5 minutes"``, ``"30 minutes"``, ``"1 hour"``, ``"1 day"``, ``" week(s)"``, ``"1 month"``, ``"1 year"``}. + "timestamp_col": + "description": | + Column that contains the timestamps of requests. The column must be one of the following: + - A ``TimestampType`` column + - A column whose values can be converted to timestamps through the pyspark + ``to_timestamp`` [function](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.to_timestamp.html). +github.com/databricks/databricks-sdk-go/service/catalog.VolumeType: + "_": + "enum": + - |- + EXTERNAL + - |- + MANAGED +github.com/databricks/databricks-sdk-go/service/compute.Adlsgen2Info: + "destination": + "description": |- + abfss destination, e.g. `abfss://@.dfs.core.windows.net/`. +github.com/databricks/databricks-sdk-go/service/compute.AutoScale: + "max_workers": + "description": |- + The maximum number of workers to which the cluster can scale up when overloaded. + Note that `max_workers` must be strictly greater than `min_workers`. + "min_workers": + "description": |- + The minimum number of workers to which the cluster can scale down when underutilized. + It is also the initial number of workers the cluster will have after creation. +github.com/databricks/databricks-sdk-go/service/compute.AwsAttributes: + "availability": {} + "ebs_volume_count": + "description": |- + The number of volumes launched for each instance. Users can choose up to 10 volumes. + This feature is only enabled for supported node types. Legacy node types cannot specify + custom EBS volumes. + For node types with no instance store, at least one EBS volume needs to be specified; + otherwise, cluster creation will fail. + + These EBS volumes will be mounted at `/ebs0`, `/ebs1`, and etc. + Instance store volumes will be mounted at `/local_disk0`, `/local_disk1`, and etc. + + If EBS volumes are attached, Databricks will configure Spark to use only the EBS volumes for + scratch storage because heterogenously sized scratch devices can lead to inefficient disk + utilization. If no EBS volumes are attached, Databricks will configure Spark to use instance + store volumes. + + Please note that if EBS volumes are specified, then the Spark configuration `spark.local.dir` + will be overridden. + "ebs_volume_iops": + "description": |- + If using gp3 volumes, what IOPS to use for the disk. If this is not set, the maximum performance of a gp2 volume with the same volume size will be used. + "ebs_volume_size": + "description": |- + The size of each EBS volume (in GiB) launched for each instance. For general purpose + SSD, this value must be within the range 100 - 4096. For throughput optimized HDD, + this value must be within the range 500 - 4096. + "ebs_volume_throughput": + "description": |- + If using gp3 volumes, what throughput to use for the disk. If this is not set, the maximum performance of a gp2 volume with the same volume size will be used. + "ebs_volume_type": {} + "first_on_demand": + "description": |- + The first `first_on_demand` nodes of the cluster will be placed on on-demand instances. + If this value is greater than 0, the cluster driver node in particular will be placed on an + on-demand instance. If this value is greater than or equal to the current cluster size, all + nodes will be placed on on-demand instances. If this value is less than the current cluster + size, `first_on_demand` nodes will be placed on on-demand instances and the remainder will + be placed on `availability` instances. Note that this value does not affect + cluster size and cannot currently be mutated over the lifetime of a cluster. + "instance_profile_arn": + "description": |- + Nodes for this cluster will only be placed on AWS instances with this instance profile. If + ommitted, nodes will be placed on instances without an IAM instance profile. The instance + profile must have previously been added to the Databricks environment by an account + administrator. + + This feature may only be available to certain customer plans. + + If this field is ommitted, we will pull in the default from the conf if it exists. + "spot_bid_price_percent": + "description": |- + The bid price for AWS spot instances, as a percentage of the corresponding instance type's + on-demand price. + For example, if this field is set to 50, and the cluster needs a new `r3.xlarge` spot + instance, then the bid price is half of the price of + on-demand `r3.xlarge` instances. Similarly, if this field is set to 200, the bid price is twice + the price of on-demand `r3.xlarge` instances. If not specified, the default value is 100. + When spot instances are requested for this cluster, only spot instances whose bid price + percentage matches this field will be considered. + Note that, for safety, we enforce this field to be no more than 10000. + + The default value and documentation here should be kept consistent with + CommonConf.defaultSpotBidPricePercent and CommonConf.maxSpotBidPricePercent. + "zone_id": + "description": |- + Identifier for the availability zone/datacenter in which the cluster resides. + This string will be of a form like "us-west-2a". The provided availability + zone must be in the same region as the Databricks deployment. For example, "us-west-2a" + is not a valid zone id if the Databricks deployment resides in the "us-east-1" region. + This is an optional field at cluster creation, and if not specified, a default zone will be used. + If the zone specified is "auto", will try to place cluster in a zone with high availability, + and will retry placement in a different AZ if there is not enough capacity. + The list of available zones as well as the default value can be found by using the + `List Zones` method. +github.com/databricks/databricks-sdk-go/service/compute.AwsAvailability: + "_": + "description": | + Availability type used for all subsequent nodes past the `first_on_demand` ones. + + Note: If `first_on_demand` is zero, this availability type will be used for the entire cluster. + "enum": + - |- + SPOT + - |- + ON_DEMAND + - |- + SPOT_WITH_FALLBACK +github.com/databricks/databricks-sdk-go/service/compute.AzureAttributes: + "availability": {} + "first_on_demand": + "description": |- + The first `first_on_demand` nodes of the cluster will be placed on on-demand instances. + This value should be greater than 0, to make sure the cluster driver node is placed on an + on-demand instance. If this value is greater than or equal to the current cluster size, all + nodes will be placed on on-demand instances. If this value is less than the current cluster + size, `first_on_demand` nodes will be placed on on-demand instances and the remainder will + be placed on `availability` instances. Note that this value does not affect + cluster size and cannot currently be mutated over the lifetime of a cluster. + "log_analytics_info": + "description": |- + Defines values necessary to configure and run Azure Log Analytics agent + "spot_bid_max_price": + "description": |- + The max bid price to be used for Azure spot instances. + The Max price for the bid cannot be higher than the on-demand price of the instance. + If not specified, the default value is -1, which specifies that the instance cannot be evicted + on the basis of price, and only on the basis of availability. Further, the value should > 0 or -1. +github.com/databricks/databricks-sdk-go/service/compute.AzureAvailability: + "_": + "description": |- + Availability type used for all subsequent nodes past the `first_on_demand` ones. + Note: If `first_on_demand` is zero (which only happens on pool clusters), this availability + type will be used for the entire cluster. + "enum": + - |- + SPOT_AZURE + - |- + ON_DEMAND_AZURE + - |- + SPOT_WITH_FALLBACK_AZURE +github.com/databricks/databricks-sdk-go/service/compute.ClientsTypes: + "jobs": + "description": |- + With jobs set, the cluster can be used for jobs + "notebooks": + "description": |- + With notebooks set, this cluster can be used for notebooks +github.com/databricks/databricks-sdk-go/service/compute.ClusterLogConf: + "dbfs": + "description": |- + destination needs to be provided. e.g. + `{ "dbfs" : { "destination" : "dbfs:/home/cluster_log" } }` + "s3": + "description": |- + destination and either the region or endpoint need to be provided. e.g. + `{ "s3": { "destination" : "s3://cluster_log_bucket/prefix", "region" : "us-west-2" } }` + Cluster iam role is used to access s3, please make sure the cluster iam role in + `instance_profile_arn` has permission to write data to the s3 destination. +github.com/databricks/databricks-sdk-go/service/compute.ClusterSpec: + "apply_policy_default_values": + "description": |- + When set to true, fixed and default values from the policy will be used for fields that are omitted. When set to false, only fixed values from the policy will be applied. + "autoscale": + "description": |- + Parameters needed in order to automatically scale clusters up and down based on load. + Note: autoscaling works best with DB runtime versions 3.0 or later. + "autotermination_minutes": + "description": |- + Automatically terminates the cluster after it is inactive for this time in minutes. If not set, + this cluster will not be automatically terminated. If specified, the threshold must be between + 10 and 10000 minutes. + Users can also set this value to 0 to explicitly disable automatic termination. + "aws_attributes": + "description": |- + Attributes related to clusters running on Amazon Web Services. + If not specified at cluster creation, a set of default values will be used. + "azure_attributes": + "description": |- + Attributes related to clusters running on Microsoft Azure. + If not specified at cluster creation, a set of default values will be used. + "cluster_log_conf": + "description": |- + The configuration for delivering spark logs to a long-term storage destination. + Two kinds of destinations (dbfs and s3) are supported. Only one destination can be specified + for one cluster. If the conf is given, the logs will be delivered to the destination every + `5 mins`. The destination of driver logs is `$destination/$clusterId/driver`, while + the destination of executor logs is `$destination/$clusterId/executor`. + "cluster_name": + "description": | + Cluster name requested by the user. This doesn't have to be unique. + If not specified at creation, the cluster name will be an empty string. + "custom_tags": + "description": |- + Additional tags for cluster resources. Databricks will tag all cluster resources (e.g., AWS + instances and EBS volumes) with these tags in addition to `default_tags`. Notes: + + - Currently, Databricks allows at most 45 custom tags + + - Clusters can only reuse cloud resources if the resources' tags are a subset of the cluster tags + "data_security_mode": {} + "docker_image": {} + "driver_instance_pool_id": + "description": |- + The optional ID of the instance pool for the driver of the cluster belongs. + The pool cluster uses the instance pool with id (instance_pool_id) if the driver pool is not + assigned. + "driver_node_type_id": + "description": | + The node type of the Spark driver. Note that this field is optional; + if unset, the driver node type will be set as the same value + as `node_type_id` defined above. + "enable_elastic_disk": + "description": |- + Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk + space when its Spark workers are running low on disk space. This feature requires specific AWS + permissions to function correctly - refer to the User Guide for more details. + "enable_local_disk_encryption": + "description": |- + Whether to enable LUKS on cluster VMs' local disks + "gcp_attributes": + "description": |- + Attributes related to clusters running on Google Cloud Platform. + If not specified at cluster creation, a set of default values will be used. + "init_scripts": + "description": |- + The configuration for storing init scripts. Any number of destinations can be specified. The scripts are executed sequentially in the order provided. If `cluster_log_conf` is specified, init script logs are sent to `//init_scripts`. + "instance_pool_id": + "description": |- + The optional ID of the instance pool to which the cluster belongs. + "is_single_node": + "description": | + This field can only be used with `kind`. + + When set to true, Databricks will automatically set single node related `custom_tags`, `spark_conf`, and `num_workers` + "kind": {} + "node_type_id": + "description": | + This field encodes, through a single value, the resources available to each of + the Spark nodes in this cluster. For example, the Spark nodes can be provisioned + and optimized for memory or compute intensive workloads. A list of available node + types can be retrieved by using the :method:clusters/listNodeTypes API call. + "num_workers": + "description": |- + Number of worker nodes that this cluster should have. A cluster has one Spark Driver + and `num_workers` Executors for a total of `num_workers` + 1 Spark nodes. + + Note: When reading the properties of a cluster, this field reflects the desired number + of workers rather than the actual current number of workers. For instance, if a cluster + is resized from 5 to 10 workers, this field will immediately be updated to reflect + the target size of 10 workers, whereas the workers listed in `spark_info` will gradually + increase from 5 to 10 as the new nodes are provisioned. + "policy_id": + "description": |- + The ID of the cluster policy used to create the cluster if applicable. + "runtime_engine": {} + "single_user_name": + "description": |- + Single user name if data_security_mode is `SINGLE_USER` + "spark_conf": + "description": | + An object containing a set of optional, user-specified Spark configuration key-value pairs. + Users can also pass in a string of extra JVM options to the driver and the executors via + `spark.driver.extraJavaOptions` and `spark.executor.extraJavaOptions` respectively. + "spark_env_vars": + "description": |- + An object containing a set of optional, user-specified environment variable key-value pairs. + Please note that key-value pair of the form (X,Y) will be exported as is (i.e., + `export X='Y'`) while launching the driver and workers. + + In order to specify an additional set of `SPARK_DAEMON_JAVA_OPTS`, we recommend appending + them to `$SPARK_DAEMON_JAVA_OPTS` as shown in the example below. This ensures that all + default databricks managed environmental variables are included as well. + + Example Spark environment variables: + `{"SPARK_WORKER_MEMORY": "28000m", "SPARK_LOCAL_DIRS": "/local_disk0"}` or + `{"SPARK_DAEMON_JAVA_OPTS": "$SPARK_DAEMON_JAVA_OPTS -Dspark.shuffle.service.enabled=true"}` + "spark_version": + "description": | + The Spark version of the cluster, e.g. `3.3.x-scala2.11`. + A list of available Spark versions can be retrieved by using + the :method:clusters/sparkVersions API call. + "ssh_public_keys": + "description": |- + SSH public key contents that will be added to each Spark node in this cluster. The + corresponding private keys can be used to login with the user name `ubuntu` on port `2200`. + Up to 10 keys can be specified. + "use_ml_runtime": + "description": | + This field can only be used with `kind`. + + `effective_spark_version` is determined by `spark_version` (DBR release), this field `use_ml_runtime`, and whether `node_type_id` is gpu node or not. + "workload_type": {} +github.com/databricks/databricks-sdk-go/service/compute.DataSecurityMode: + "_": + "description": | + Data security mode decides what data governance model to use when accessing data + from a cluster. + + The following modes can only be used with `kind`. + * `DATA_SECURITY_MODE_AUTO`: Databricks will choose the most appropriate access mode depending on your compute configuration. + * `DATA_SECURITY_MODE_STANDARD`: Alias for `USER_ISOLATION`. + * `DATA_SECURITY_MODE_DEDICATED`: Alias for `SINGLE_USER`. + + The following modes can be used regardless of `kind`. + * `NONE`: No security isolation for multiple users sharing the cluster. Data governance features are not available in this mode. + * `SINGLE_USER`: A secure cluster that can only be exclusively used by a single user specified in `single_user_name`. Most programming languages, cluster features and data governance features are available in this mode. + * `USER_ISOLATION`: A secure cluster that can be shared by multiple users. Cluster users are fully isolated so that they cannot see each other's data and credentials. Most data governance features are supported in this mode. But programming languages and cluster features might be limited. + + The following modes are deprecated starting with Databricks Runtime 15.0 and + will be removed for future Databricks Runtime versions: + + * `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table ACL clusters. + * `LEGACY_PASSTHROUGH`: This mode is for users migrating from legacy Passthrough on high concurrency clusters. + * `LEGACY_SINGLE_USER`: This mode is for users migrating from legacy Passthrough on standard clusters. + * `LEGACY_SINGLE_USER_STANDARD`: This mode provides a way that doesn’t have UC nor passthrough enabled. + "enum": + - |- + DATA_SECURITY_MODE_AUTO + - |- + DATA_SECURITY_MODE_STANDARD + - |- + DATA_SECURITY_MODE_DEDICATED + - |- + NONE + - |- + SINGLE_USER + - |- + USER_ISOLATION + - |- + LEGACY_TABLE_ACL + - |- + LEGACY_PASSTHROUGH + - |- + LEGACY_SINGLE_USER + - |- + LEGACY_SINGLE_USER_STANDARD +github.com/databricks/databricks-sdk-go/service/compute.DbfsStorageInfo: + "destination": + "description": |- + dbfs destination, e.g. `dbfs:/my/path` +github.com/databricks/databricks-sdk-go/service/compute.DockerBasicAuth: + "password": + "description": |- + Password of the user + "username": + "description": |- + Name of the user +github.com/databricks/databricks-sdk-go/service/compute.DockerImage: + "basic_auth": {} + "url": + "description": |- + URL of the docker image. +github.com/databricks/databricks-sdk-go/service/compute.EbsVolumeType: + "_": + "description": |- + The type of EBS volumes that will be launched with this cluster. + "enum": + - |- + GENERAL_PURPOSE_SSD + - |- + THROUGHPUT_OPTIMIZED_HDD +github.com/databricks/databricks-sdk-go/service/compute.Environment: + "_": + "description": |- + The environment entity used to preserve serverless environment side panel and jobs' environment for non-notebook task. + In this minimal environment spec, only pip dependencies are supported. + "client": + "description": |- + Client version used by the environment + The client is the user-facing environment of the runtime. + Each client comes with a specific set of pre-installed libraries. + The version is a string, consisting of the major client version. + "dependencies": + "description": |- + List of pip dependencies, as supported by the version of pip in this environment. + Each dependency is a pip requirement file line https://pip.pypa.io/en/stable/reference/requirements-file-format/ + Allowed dependency could be , , (WSFS or Volumes in Databricks), + E.g. dependencies: ["foo==0.0.1", "-r /Workspace/test/requirements.txt"] +github.com/databricks/databricks-sdk-go/service/compute.GcpAttributes: + "availability": {} + "boot_disk_size": + "description": |- + boot disk size in GB + "google_service_account": + "description": |- + If provided, the cluster will impersonate the google service account when accessing + gcloud services (like GCS). The google service account + must have previously been added to the Databricks environment by an account + administrator. + "local_ssd_count": + "description": |- + If provided, each node (workers and driver) in the cluster will have this number of local SSDs attached. Each local SSD is 375GB in size. Refer to [GCP documentation](https://cloud.google.com/compute/docs/disks/local-ssd#choose_number_local_ssds) for the supported number of local SSDs for each instance type. + "use_preemptible_executors": + "description": |- + This field determines whether the spark executors will be scheduled to run on preemptible VMs (when set to true) versus standard compute engine VMs (when set to false; default). + Note: Soon to be deprecated, use the availability field instead. + "zone_id": + "description": |- + Identifier for the availability zone in which the cluster resides. + This can be one of the following: + - "HA" => High availability, spread nodes across availability zones for a Databricks deployment region [default] + - "AUTO" => Databricks picks an availability zone to schedule the cluster on. + - A GCP availability zone => Pick One of the available zones for (machine type + region) from https://cloud.google.com/compute/docs/regions-zones. +github.com/databricks/databricks-sdk-go/service/compute.GcpAvailability: + "_": + "description": |- + This field determines whether the instance pool will contain preemptible + VMs, on-demand VMs, or preemptible VMs with a fallback to on-demand VMs if the former is unavailable. + "enum": + - |- + PREEMPTIBLE_GCP + - |- + ON_DEMAND_GCP + - |- + PREEMPTIBLE_WITH_FALLBACK_GCP +github.com/databricks/databricks-sdk-go/service/compute.GcsStorageInfo: + "destination": + "description": |- + GCS destination/URI, e.g. `gs://my-bucket/some-prefix` +github.com/databricks/databricks-sdk-go/service/compute.InitScriptInfo: + "abfss": + "description": |- + destination needs to be provided. e.g. + `{ "abfss" : { "destination" : "abfss://@.dfs.core.windows.net/" } } + "dbfs": + "description": |- + destination needs to be provided. e.g. + `{ "dbfs" : { "destination" : "dbfs:/home/cluster_log" } }` + "file": + "description": |- + destination needs to be provided. e.g. + `{ "file" : { "destination" : "file:/my/local/file.sh" } }` + "gcs": + "description": |- + destination needs to be provided. e.g. + `{ "gcs": { "destination": "gs://my-bucket/file.sh" } }` + "s3": + "description": |- + destination and either the region or endpoint need to be provided. e.g. + `{ "s3": { "destination" : "s3://cluster_log_bucket/prefix", "region" : "us-west-2" } }` + Cluster iam role is used to access s3, please make sure the cluster iam role in + `instance_profile_arn` has permission to write data to the s3 destination. + "volumes": + "description": |- + destination needs to be provided. e.g. + `{ "volumes" : { "destination" : "/Volumes/my-init.sh" } }` + "workspace": + "description": |- + destination needs to be provided. e.g. + `{ "workspace" : { "destination" : "/Users/user1@databricks.com/my-init.sh" } }` +github.com/databricks/databricks-sdk-go/service/compute.Library: + "cran": + "description": |- + Specification of a CRAN library to be installed as part of the library + "egg": + "description": |- + Deprecated. URI of the egg library to install. Installing Python egg files is deprecated and is not supported in Databricks Runtime 14.0 and above. + "jar": + "description": |- + URI of the JAR library to install. Supported URIs include Workspace paths, Unity Catalog Volumes paths, and S3 URIs. + For example: `{ "jar": "/Workspace/path/to/library.jar" }`, `{ "jar" : "/Volumes/path/to/library.jar" }` or + `{ "jar": "s3://my-bucket/library.jar" }`. + If S3 is used, please make sure the cluster has read access on the library. You may need to + launch the cluster with an IAM role to access the S3 URI. + "maven": + "description": |- + Specification of a maven library to be installed. For example: + `{ "coordinates": "org.jsoup:jsoup:1.7.2" }` + "pypi": + "description": |- + Specification of a PyPi library to be installed. For example: + `{ "package": "simplejson" }` + "requirements": + "description": |- + URI of the requirements.txt file to install. Only Workspace paths and Unity Catalog Volumes paths are supported. + For example: `{ "requirements": "/Workspace/path/to/requirements.txt" }` or `{ "requirements" : "/Volumes/path/to/requirements.txt" }` + "whl": + "description": |- + URI of the wheel library to install. Supported URIs include Workspace paths, Unity Catalog Volumes paths, and S3 URIs. + For example: `{ "whl": "/Workspace/path/to/library.whl" }`, `{ "whl" : "/Volumes/path/to/library.whl" }` or + `{ "whl": "s3://my-bucket/library.whl" }`. + If S3 is used, please make sure the cluster has read access on the library. You may need to + launch the cluster with an IAM role to access the S3 URI. +github.com/databricks/databricks-sdk-go/service/compute.LocalFileInfo: + "destination": + "description": |- + local file destination, e.g. `file:/my/local/file.sh` +github.com/databricks/databricks-sdk-go/service/compute.LogAnalyticsInfo: + "log_analytics_primary_key": + "description": |- + + "log_analytics_workspace_id": + "description": |- + +github.com/databricks/databricks-sdk-go/service/compute.MavenLibrary: + "coordinates": + "description": |- + Gradle-style maven coordinates. For example: "org.jsoup:jsoup:1.7.2". + "exclusions": + "description": |- + List of dependences to exclude. For example: `["slf4j:slf4j", "*:hadoop-client"]`. + + Maven dependency exclusions: + https://maven.apache.org/guides/introduction/introduction-to-optional-and-excludes-dependencies.html. + "repo": + "description": |- + Maven repo to install the Maven package from. If omitted, both Maven Central Repository + and Spark Packages are searched. +github.com/databricks/databricks-sdk-go/service/compute.PythonPyPiLibrary: + "package": + "description": |- + The name of the pypi package to install. An optional exact version specification is also + supported. Examples: "simplejson" and "simplejson==3.8.0". + "repo": + "description": |- + The repository where the package can be found. If not specified, the default pip index is + used. +github.com/databricks/databricks-sdk-go/service/compute.RCranLibrary: + "package": + "description": |- + The name of the CRAN package to install. + "repo": + "description": |- + The repository where the package can be found. If not specified, the default CRAN repo is used. +github.com/databricks/databricks-sdk-go/service/compute.RuntimeEngine: + "_": + "description": | + Determines the cluster's runtime engine, either standard or Photon. + + This field is not compatible with legacy `spark_version` values that contain `-photon-`. + Remove `-photon-` from the `spark_version` and set `runtime_engine` to `PHOTON`. + + If left unspecified, the runtime engine defaults to standard unless the spark_version + contains -photon-, in which case Photon will be used. + "enum": + - |- + NULL + - |- + STANDARD + - |- + PHOTON +github.com/databricks/databricks-sdk-go/service/compute.S3StorageInfo: + "canned_acl": + "description": |- + (Optional) Set canned access control list for the logs, e.g. `bucket-owner-full-control`. + If `canned_cal` is set, please make sure the cluster iam role has `s3:PutObjectAcl` permission on + the destination bucket and prefix. The full list of possible canned acl can be found at + http://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl. + Please also note that by default only the object owner gets full controls. If you are using cross account + role for writing data, you may want to set `bucket-owner-full-control` to make bucket owner able to + read the logs. + "destination": + "description": |- + S3 destination, e.g. `s3://my-bucket/some-prefix` Note that logs will be delivered using + cluster iam role, please make sure you set cluster iam role and the role has write access to the + destination. Please also note that you cannot use AWS keys to deliver logs. + "enable_encryption": + "description": |- + (Optional) Flag to enable server side encryption, `false` by default. + "encryption_type": + "description": |- + (Optional) The encryption type, it could be `sse-s3` or `sse-kms`. It will be used only when + encryption is enabled and the default type is `sse-s3`. + "endpoint": + "description": |- + S3 endpoint, e.g. `https://s3-us-west-2.amazonaws.com`. Either region or endpoint needs to be set. + If both are set, endpoint will be used. + "kms_key": + "description": |- + (Optional) Kms key which will be used if encryption is enabled and encryption type is set to `sse-kms`. + "region": + "description": |- + S3 region, e.g. `us-west-2`. Either region or endpoint needs to be set. If both are set, + endpoint will be used. +github.com/databricks/databricks-sdk-go/service/compute.VolumesStorageInfo: + "destination": + "description": |- + Unity Catalog Volumes file destination, e.g. `/Volumes/my-init.sh` +github.com/databricks/databricks-sdk-go/service/compute.WorkloadType: + "clients": + "description": |2- + defined what type of clients can use the cluster. E.g. Notebooks, Jobs +github.com/databricks/databricks-sdk-go/service/compute.WorkspaceStorageInfo: + "destination": + "description": |- + workspace files destination, e.g. `/Users/user1@databricks.com/my-init.sh` +github.com/databricks/databricks-sdk-go/service/dashboards.LifecycleState: + "_": + "enum": + - |- + ACTIVE + - |- + TRASHED +github.com/databricks/databricks-sdk-go/service/jobs.CleanRoomsNotebookTask: + "clean_room_name": + "description": |- + The clean room that the notebook belongs to. + "etag": + "description": |- + Checksum to validate the freshness of the notebook resource (i.e. the notebook being run is the latest version). + It can be fetched by calling the :method:cleanroomassets/get API. + "notebook_base_parameters": + "description": |- + Base parameters to be used for the clean room notebook job. + "notebook_name": + "description": |- + Name of the notebook being run. +github.com/databricks/databricks-sdk-go/service/jobs.Condition: + "_": + "enum": + - |- + ANY_UPDATED + - |- + ALL_UPDATED +github.com/databricks/databricks-sdk-go/service/jobs.ConditionTask: + "left": + "description": |- + The left operand of the condition task. Can be either a string value or a job state or parameter reference. + "op": + "description": |- + * `EQUAL_TO`, `NOT_EQUAL` operators perform string comparison of their operands. This means that `“12.0” == “12”` will evaluate to `false`. + * `GREATER_THAN`, `GREATER_THAN_OR_EQUAL`, `LESS_THAN`, `LESS_THAN_OR_EQUAL` operators perform numeric comparison of their operands. `“12.0” >= “12”` will evaluate to `true`, `“10.0” >= “12”` will evaluate to `false`. + + The boolean comparison to task values can be implemented with operators `EQUAL_TO`, `NOT_EQUAL`. If a task value was set to a boolean value, it will be serialized to `“true”` or `“false”` for the comparison. + "right": + "description": |- + The right operand of the condition task. Can be either a string value or a job state or parameter reference. +github.com/databricks/databricks-sdk-go/service/jobs.ConditionTaskOp: + "_": + "description": |- + * `EQUAL_TO`, `NOT_EQUAL` operators perform string comparison of their operands. This means that `“12.0” == “12”` will evaluate to `false`. + * `GREATER_THAN`, `GREATER_THAN_OR_EQUAL`, `LESS_THAN`, `LESS_THAN_OR_EQUAL` operators perform numeric comparison of their operands. `“12.0” >= “12”` will evaluate to `true`, `“10.0” >= “12”` will evaluate to `false`. + + The boolean comparison to task values can be implemented with operators `EQUAL_TO`, `NOT_EQUAL`. If a task value was set to a boolean value, it will be serialized to `“true”` or `“false”` for the comparison. + "enum": + - |- + EQUAL_TO + - |- + GREATER_THAN + - |- + GREATER_THAN_OR_EQUAL + - |- + LESS_THAN + - |- + LESS_THAN_OR_EQUAL + - |- + NOT_EQUAL +github.com/databricks/databricks-sdk-go/service/jobs.Continuous: + "pause_status": + "description": |- + Indicate whether the continuous execution of the job is paused or not. Defaults to UNPAUSED. +github.com/databricks/databricks-sdk-go/service/jobs.CronSchedule: + "pause_status": + "description": |- + Indicate whether this schedule is paused or not. + "quartz_cron_expression": + "description": |- + A Cron expression using Quartz syntax that describes the schedule for a job. See [Cron Trigger](http://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/crontrigger.html) for details. This field is required. + "timezone_id": + "description": |- + A Java timezone ID. The schedule for a job is resolved with respect to this timezone. See [Java TimeZone](https://docs.oracle.com/javase/7/docs/api/java/util/TimeZone.html) for details. This field is required. +github.com/databricks/databricks-sdk-go/service/jobs.DbtTask: + "catalog": + "description": |- + Optional name of the catalog to use. The value is the top level in the 3-level namespace of Unity Catalog (catalog / schema / relation). The catalog value can only be specified if a warehouse_id is specified. Requires dbt-databricks >= 1.1.1. + "commands": + "description": |- + A list of dbt commands to execute. All commands must start with `dbt`. This parameter must not be empty. A maximum of up to 10 commands can be provided. + "profiles_directory": + "description": |- + Optional (relative) path to the profiles directory. Can only be specified if no warehouse_id is specified. If no warehouse_id is specified and this folder is unset, the root directory is used. + "project_directory": + "description": |- + Path to the project directory. Optional for Git sourced tasks, in which + case if no value is provided, the root of the Git repository is used. + "schema": + "description": |- + Optional schema to write to. This parameter is only used when a warehouse_id is also provided. If not provided, the `default` schema is used. + "source": + "description": |- + Optional location type of the project directory. When set to `WORKSPACE`, the project will be retrieved + from the local Databricks workspace. When set to `GIT`, the project will be retrieved from a Git repository + defined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise. + + * `WORKSPACE`: Project is located in Databricks workspace. + * `GIT`: Project is located in cloud Git provider. + "warehouse_id": + "description": |- + ID of the SQL warehouse to connect to. If provided, we automatically generate and provide the profile and connection details to dbt. It can be overridden on a per-command basis by using the `--profiles-dir` command line argument. +github.com/databricks/databricks-sdk-go/service/jobs.FileArrivalTriggerConfiguration: + "min_time_between_triggers_seconds": + "description": |- + If set, the trigger starts a run only after the specified amount of time passed since + the last time the trigger fired. The minimum allowed value is 60 seconds + "url": + "description": |- + URL to be monitored for file arrivals. The path must point to the root or a subpath of the external location. + "wait_after_last_change_seconds": + "description": |- + If set, the trigger starts a run only after no file activity has occurred for the specified amount of time. + This makes it possible to wait for a batch of incoming files to arrive before triggering a run. The + minimum allowed value is 60 seconds. +github.com/databricks/databricks-sdk-go/service/jobs.ForEachTask: + "concurrency": + "description": |- + An optional maximum allowed number of concurrent runs of the task. + Set this value if you want to be able to execute multiple runs of the task concurrently. + "inputs": + "description": |- + Array for task to iterate on. This can be a JSON string or a reference to + an array parameter. + "task": + "description": |- + Configuration for the task that will be run for each element in the array +github.com/databricks/databricks-sdk-go/service/jobs.Format: + "_": + "enum": + - |- + SINGLE_TASK + - |- + MULTI_TASK +github.com/databricks/databricks-sdk-go/service/jobs.GitProvider: + "_": + "enum": + - |- + gitHub + - |- + bitbucketCloud + - |- + azureDevOpsServices + - |- + gitHubEnterprise + - |- + bitbucketServer + - |- + gitLab + - |- + gitLabEnterpriseEdition + - |- + awsCodeCommit +github.com/databricks/databricks-sdk-go/service/jobs.GitSnapshot: + "_": + "description": |- + Read-only state of the remote repository at the time the job was run. This field is only included on job runs. + "used_commit": + "description": |- + Commit that was used to execute the run. If git_branch was specified, this points to the HEAD of the branch at the time of the run; if git_tag was specified, this points to the commit the tag points to. +github.com/databricks/databricks-sdk-go/service/jobs.GitSource: + "_": + "description": |- + An optional specification for a remote Git repository containing the source code used by tasks. Version-controlled source code is supported by notebook, dbt, Python script, and SQL File tasks. + + If `git_source` is set, these tasks retrieve the file from the remote repository by default. However, this behavior can be overridden by setting `source` to `WORKSPACE` on the task. + + Note: dbt and SQL File tasks support only version-controlled sources. If dbt or SQL File tasks are used, `git_source` must be defined on the job. + "git_branch": + "description": |- + Name of the branch to be checked out and used by this job. This field cannot be specified in conjunction with git_tag or git_commit. + "git_commit": + "description": |- + Commit to be checked out and used by this job. This field cannot be specified in conjunction with git_branch or git_tag. + "git_provider": + "description": |- + Unique identifier of the service used to host the Git repository. The value is case insensitive. + "git_snapshot": {} + "git_tag": + "description": |- + Name of the tag to be checked out and used by this job. This field cannot be specified in conjunction with git_branch or git_commit. + "git_url": + "description": |- + URL of the repository to be cloned by this job. + "job_source": + "description": |- + The source of the job specification in the remote repository when the job is source controlled. +github.com/databricks/databricks-sdk-go/service/jobs.JobCluster: + "job_cluster_key": + "description": |- + A unique name for the job cluster. This field is required and must be unique within the job. + `JobTaskSettings` may refer to this field to determine which cluster to launch for the task execution. + "new_cluster": + "description": |- + If new_cluster, a description of a cluster that is created for each task. +github.com/databricks/databricks-sdk-go/service/jobs.JobDeployment: + "kind": + "description": |- + The kind of deployment that manages the job. + + * `BUNDLE`: The job is managed by Databricks Asset Bundle. + "metadata_file_path": + "description": |- + Path of the file that contains deployment metadata. +github.com/databricks/databricks-sdk-go/service/jobs.JobDeploymentKind: + "_": + "description": |- + * `BUNDLE`: The job is managed by Databricks Asset Bundle. + "enum": + - |- + BUNDLE +github.com/databricks/databricks-sdk-go/service/jobs.JobEditMode: + "_": + "description": |- + Edit mode of the job. + + * `UI_LOCKED`: The job is in a locked UI state and cannot be modified. + * `EDITABLE`: The job is in an editable state and can be modified. + "enum": + - |- + UI_LOCKED + - |- + EDITABLE +github.com/databricks/databricks-sdk-go/service/jobs.JobEmailNotifications: + "no_alert_for_skipped_runs": + "description": |- + If true, do not send email to recipients specified in `on_failure` if the run is skipped. + This field is `deprecated`. Please use the `notification_settings.no_alert_for_skipped_runs` field. + "on_duration_warning_threshold_exceeded": + "description": |- + A list of email addresses to be notified when the duration of a run exceeds the threshold specified for the `RUN_DURATION_SECONDS` metric in the `health` field. If no rule for the `RUN_DURATION_SECONDS` metric is specified in the `health` field for the job, notifications are not sent. + "on_failure": + "description": |- + A list of email addresses to be notified when a run unsuccessfully completes. A run is considered to have completed unsuccessfully if it ends with an `INTERNAL_ERROR` `life_cycle_state` or a `FAILED`, or `TIMED_OUT` result_state. If this is not specified on job creation, reset, or update the list is empty, and notifications are not sent. + "on_start": + "description": |- + A list of email addresses to be notified when a run begins. If not specified on job creation, reset, or update, the list is empty, and notifications are not sent. + "on_streaming_backlog_exceeded": + "description": |- + A list of email addresses to notify when any streaming backlog thresholds are exceeded for any stream. + Streaming backlog thresholds can be set in the `health` field using the following metrics: `STREAMING_BACKLOG_BYTES`, `STREAMING_BACKLOG_RECORDS`, `STREAMING_BACKLOG_SECONDS`, or `STREAMING_BACKLOG_FILES`. + Alerting is based on the 10-minute average of these metrics. If the issue persists, notifications are resent every 30 minutes. + "on_success": + "description": |- + A list of email addresses to be notified when a run successfully completes. A run is considered to have completed successfully if it ends with a `TERMINATED` `life_cycle_state` and a `SUCCESS` result_state. If not specified on job creation, reset, or update, the list is empty, and notifications are not sent. +github.com/databricks/databricks-sdk-go/service/jobs.JobEnvironment: + "environment_key": + "description": |- + The key of an environment. It has to be unique within a job. + "spec": {} +github.com/databricks/databricks-sdk-go/service/jobs.JobNotificationSettings: + "no_alert_for_canceled_runs": + "description": |- + If true, do not send notifications to recipients specified in `on_failure` if the run is canceled. + "no_alert_for_skipped_runs": + "description": |- + If true, do not send notifications to recipients specified in `on_failure` if the run is skipped. +github.com/databricks/databricks-sdk-go/service/jobs.JobParameterDefinition: + "default": + "description": |- + Default value of the parameter. + "name": + "description": |- + The name of the defined parameter. May only contain alphanumeric characters, `_`, `-`, and `.` +github.com/databricks/databricks-sdk-go/service/jobs.JobRunAs: + "_": + "description": |- + Write-only setting. Specifies the user or service principal that the job runs as. If not specified, the job runs as the user who created the job. + + Either `user_name` or `service_principal_name` should be specified. If not, an error is thrown. + "service_principal_name": + "description": |- + Application ID of an active service principal. Setting this field requires the `servicePrincipal/user` role. + "user_name": + "description": |- + The email of an active workspace user. Non-admin users can only set this field to their own email. +github.com/databricks/databricks-sdk-go/service/jobs.JobSource: + "_": + "description": |- + The source of the job specification in the remote repository when the job is source controlled. + "dirty_state": + "description": |- + Dirty state indicates the job is not fully synced with the job specification in the remote repository. + + Possible values are: + * `NOT_SYNCED`: The job is not yet synced with the remote job specification. Import the remote job specification from UI to make the job fully synced. + * `DISCONNECTED`: The job is temporary disconnected from the remote job specification and is allowed for live edit. Import the remote job specification again from UI to make the job fully synced. + "import_from_git_branch": + "description": |- + Name of the branch which the job is imported from. + "job_config_path": + "description": |- + Path of the job YAML file that contains the job specification. +github.com/databricks/databricks-sdk-go/service/jobs.JobSourceDirtyState: + "_": + "description": |- + Dirty state indicates the job is not fully synced with the job specification + in the remote repository. + + Possible values are: + * `NOT_SYNCED`: The job is not yet synced with the remote job specification. Import the remote job specification from UI to make the job fully synced. + * `DISCONNECTED`: The job is temporary disconnected from the remote job specification and is allowed for live edit. Import the remote job specification again from UI to make the job fully synced. + "enum": + - |- + NOT_SYNCED + - |- + DISCONNECTED +github.com/databricks/databricks-sdk-go/service/jobs.JobsHealthMetric: + "_": + "description": |- + Specifies the health metric that is being evaluated for a particular health rule. + + * `RUN_DURATION_SECONDS`: Expected total time for a run in seconds. + * `STREAMING_BACKLOG_BYTES`: An estimate of the maximum bytes of data waiting to be consumed across all streams. This metric is in Public Preview. + * `STREAMING_BACKLOG_RECORDS`: An estimate of the maximum offset lag across all streams. This metric is in Public Preview. + * `STREAMING_BACKLOG_SECONDS`: An estimate of the maximum consumer delay across all streams. This metric is in Public Preview. + * `STREAMING_BACKLOG_FILES`: An estimate of the maximum number of outstanding files across all streams. This metric is in Public Preview. + "enum": + - |- + RUN_DURATION_SECONDS + - |- + STREAMING_BACKLOG_BYTES + - |- + STREAMING_BACKLOG_RECORDS + - |- + STREAMING_BACKLOG_SECONDS + - |- + STREAMING_BACKLOG_FILES +github.com/databricks/databricks-sdk-go/service/jobs.JobsHealthOperator: + "_": + "description": |- + Specifies the operator used to compare the health metric value with the specified threshold. + "enum": + - |- + GREATER_THAN +github.com/databricks/databricks-sdk-go/service/jobs.JobsHealthRule: + "metric": {} + "op": {} + "value": + "description": |- + Specifies the threshold value that the health metric should obey to satisfy the health rule. +github.com/databricks/databricks-sdk-go/service/jobs.JobsHealthRules: + "_": + "description": |- + An optional set of health rules that can be defined for this job. + "rules": {} +github.com/databricks/databricks-sdk-go/service/jobs.NotebookTask: + "base_parameters": + "description": |- + Base parameters to be used for each run of this job. If the run is initiated by a call to :method:jobs/run + Now with parameters specified, the two parameters maps are merged. If the same key is specified in + `base_parameters` and in `run-now`, the value from `run-now` is used. + Use [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs. + + If the notebook takes a parameter that is not specified in the job’s `base_parameters` or the `run-now` override parameters, + the default value from the notebook is used. + + Retrieve these parameters in a notebook using [dbutils.widgets.get](https://docs.databricks.com/dev-tools/databricks-utils.html#dbutils-widgets). + + The JSON representation of this field cannot exceed 1MB. + "notebook_path": + "description": |- + The path of the notebook to be run in the Databricks workspace or remote repository. + For notebooks stored in the Databricks workspace, the path must be absolute and begin with a slash. + For notebooks stored in a remote repository, the path must be relative. This field is required. + "source": + "description": |- + Optional location type of the notebook. When set to `WORKSPACE`, the notebook will be retrieved from the local Databricks workspace. When set to `GIT`, the notebook will be retrieved from a Git repository + defined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise. + * `WORKSPACE`: Notebook is located in Databricks workspace. + * `GIT`: Notebook is located in cloud Git provider. + "warehouse_id": + "description": |- + Optional `warehouse_id` to run the notebook on a SQL warehouse. Classic SQL warehouses are NOT supported, please use serverless or pro SQL warehouses. + + Note that SQL warehouses only support SQL cells; if the notebook contains non-SQL cells, the run will fail. +github.com/databricks/databricks-sdk-go/service/jobs.PauseStatus: + "_": + "enum": + - |- + UNPAUSED + - |- + PAUSED +github.com/databricks/databricks-sdk-go/service/jobs.PeriodicTriggerConfiguration: + "interval": + "description": |- + The interval at which the trigger should run. + "unit": + "description": |- + The unit of time for the interval. +github.com/databricks/databricks-sdk-go/service/jobs.PeriodicTriggerConfigurationTimeUnit: + "_": + "enum": + - |- + HOURS + - |- + DAYS + - |- + WEEKS +github.com/databricks/databricks-sdk-go/service/jobs.PipelineParams: + "full_refresh": + "description": |- + If true, triggers a full refresh on the delta live table. +github.com/databricks/databricks-sdk-go/service/jobs.PipelineTask: + "full_refresh": + "description": |- + If true, triggers a full refresh on the delta live table. + "pipeline_id": + "description": |- + The full name of the pipeline task to execute. +github.com/databricks/databricks-sdk-go/service/jobs.PythonWheelTask: + "entry_point": + "description": |- + Named entry point to use, if it does not exist in the metadata of the package it executes the function from the package directly using `$packageName.$entryPoint()` + "named_parameters": + "description": |- + Command-line parameters passed to Python wheel task in the form of `["--name=task", "--data=dbfs:/path/to/data.json"]`. Leave it empty if `parameters` is not null. + "package_name": + "description": |- + Name of the package to execute + "parameters": + "description": |- + Command-line parameters passed to Python wheel task. Leave it empty if `named_parameters` is not null. +github.com/databricks/databricks-sdk-go/service/jobs.QueueSettings: + "enabled": + "description": |- + If true, enable queueing for the job. This is a required field. +github.com/databricks/databricks-sdk-go/service/jobs.RunIf: + "_": + "description": |- + An optional value indicating the condition that determines whether the task should be run once its dependencies have been completed. When omitted, defaults to `ALL_SUCCESS`. + + Possible values are: + * `ALL_SUCCESS`: All dependencies have executed and succeeded + * `AT_LEAST_ONE_SUCCESS`: At least one dependency has succeeded + * `NONE_FAILED`: None of the dependencies have failed and at least one was executed + * `ALL_DONE`: All dependencies have been completed + * `AT_LEAST_ONE_FAILED`: At least one dependency failed + * `ALL_FAILED`: ALl dependencies have failed + "enum": + - |- + ALL_SUCCESS + - |- + ALL_DONE + - |- + NONE_FAILED + - |- + AT_LEAST_ONE_SUCCESS + - |- + ALL_FAILED + - |- + AT_LEAST_ONE_FAILED +github.com/databricks/databricks-sdk-go/service/jobs.RunJobTask: + "dbt_commands": + "description": |- + An array of commands to execute for jobs with the dbt task, for example `"dbt_commands": ["dbt deps", "dbt seed", "dbt deps", "dbt seed", "dbt run"]` + "jar_params": + "description": |- + A list of parameters for jobs with Spark JAR tasks, for example `"jar_params": ["john doe", "35"]`. + The parameters are used to invoke the main function of the main class specified in the Spark JAR task. + If not specified upon `run-now`, it defaults to an empty list. + jar_params cannot be specified in conjunction with notebook_params. + The JSON representation of this field (for example `{"jar_params":["john doe","35"]}`) cannot exceed 10,000 bytes. + + Use [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs. + "job_id": + "description": |- + ID of the job to trigger. + "job_parameters": + "description": |- + Job-level parameters used to trigger the job. + "notebook_params": + "description": |- + A map from keys to values for jobs with notebook task, for example `"notebook_params": {"name": "john doe", "age": "35"}`. + The map is passed to the notebook and is accessible through the [dbutils.widgets.get](https://docs.databricks.com/dev-tools/databricks-utils.html) function. + + If not specified upon `run-now`, the triggered run uses the job’s base parameters. + + notebook_params cannot be specified in conjunction with jar_params. + + Use [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs. + + The JSON representation of this field (for example `{"notebook_params":{"name":"john doe","age":"35"}}`) cannot exceed 10,000 bytes. + "pipeline_params": + "description": |- + Controls whether the pipeline should perform a full refresh + "python_named_params": {} + "python_params": + "description": |- + A list of parameters for jobs with Python tasks, for example `"python_params": ["john doe", "35"]`. + The parameters are passed to Python file as command-line parameters. If specified upon `run-now`, it would overwrite + the parameters specified in job setting. The JSON representation of this field (for example `{"python_params":["john doe","35"]}`) + cannot exceed 10,000 bytes. + + Use [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs. + + Important + + These parameters accept only Latin characters (ASCII character set). Using non-ASCII characters returns an error. + Examples of invalid, non-ASCII characters are Chinese, Japanese kanjis, and emojis. + "spark_submit_params": + "description": |- + A list of parameters for jobs with spark submit task, for example `"spark_submit_params": ["--class", "org.apache.spark.examples.SparkPi"]`. + The parameters are passed to spark-submit script as command-line parameters. If specified upon `run-now`, it would overwrite the + parameters specified in job setting. The JSON representation of this field (for example `{"python_params":["john doe","35"]}`) + cannot exceed 10,000 bytes. + + Use [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs + + Important + + These parameters accept only Latin characters (ASCII character set). Using non-ASCII characters returns an error. + Examples of invalid, non-ASCII characters are Chinese, Japanese kanjis, and emojis. + "sql_params": + "description": |- + A map from keys to values for jobs with SQL task, for example `"sql_params": {"name": "john doe", "age": "35"}`. The SQL alert task does not support custom parameters. +github.com/databricks/databricks-sdk-go/service/jobs.Source: + "_": + "description": |- + Optional location type of the SQL file. When set to `WORKSPACE`, the SQL file will be retrieved\ + from the local Databricks workspace. When set to `GIT`, the SQL file will be retrieved from a Git repository + defined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise. + + * `WORKSPACE`: SQL file is located in Databricks workspace. + * `GIT`: SQL file is located in cloud Git provider. + "enum": + - |- + WORKSPACE + - |- + GIT +github.com/databricks/databricks-sdk-go/service/jobs.SparkJarTask: + "jar_uri": + "description": |- + Deprecated since 04/2016. Provide a `jar` through the `libraries` field instead. For an example, see :method:jobs/create. + "main_class_name": + "description": |- + The full name of the class containing the main method to be executed. This class must be contained in a JAR provided as a library. + + The code must use `SparkContext.getOrCreate` to obtain a Spark context; otherwise, runs of the job fail. + "parameters": + "description": |- + Parameters passed to the main method. + + Use [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs. +github.com/databricks/databricks-sdk-go/service/jobs.SparkPythonTask: + "parameters": + "description": |- + Command line parameters passed to the Python file. + + Use [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs. + "python_file": + "description": |- + The Python file to be executed. Cloud file URIs (such as dbfs:/, s3:/, adls:/, gcs:/) and workspace paths are supported. For python files stored in the Databricks workspace, the path must be absolute and begin with `/`. For files stored in a remote repository, the path must be relative. This field is required. + "source": + "description": |- + Optional location type of the Python file. When set to `WORKSPACE` or not specified, the file will be retrieved from the local + Databricks workspace or cloud location (if the `python_file` has a URI format). When set to `GIT`, + the Python file will be retrieved from a Git repository defined in `git_source`. + + * `WORKSPACE`: The Python file is located in a Databricks workspace or at a cloud filesystem URI. + * `GIT`: The Python file is located in a remote Git repository. +github.com/databricks/databricks-sdk-go/service/jobs.SparkSubmitTask: + "parameters": + "description": |- + Command-line parameters passed to spark submit. + + Use [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs. +github.com/databricks/databricks-sdk-go/service/jobs.SqlTask: + "alert": + "description": |- + If alert, indicates that this job must refresh a SQL alert. + "dashboard": + "description": |- + If dashboard, indicates that this job must refresh a SQL dashboard. + "file": + "description": |- + If file, indicates that this job runs a SQL file in a remote Git repository. + "parameters": + "description": |- + Parameters to be used for each run of this job. The SQL alert task does not support custom parameters. + "query": + "description": |- + If query, indicates that this job must execute a SQL query. + "warehouse_id": + "description": |- + The canonical identifier of the SQL warehouse. Recommended to use with serverless or pro SQL warehouses. Classic SQL warehouses are only supported for SQL alert, dashboard and query tasks and are limited to scheduled single-task jobs. +github.com/databricks/databricks-sdk-go/service/jobs.SqlTaskAlert: + "alert_id": + "description": |- + The canonical identifier of the SQL alert. + "pause_subscriptions": + "description": |- + If true, the alert notifications are not sent to subscribers. + "subscriptions": + "description": |- + If specified, alert notifications are sent to subscribers. +github.com/databricks/databricks-sdk-go/service/jobs.SqlTaskDashboard: + "custom_subject": + "description": |- + Subject of the email sent to subscribers of this task. + "dashboard_id": + "description": |- + The canonical identifier of the SQL dashboard. + "pause_subscriptions": + "description": |- + If true, the dashboard snapshot is not taken, and emails are not sent to subscribers. + "subscriptions": + "description": |- + If specified, dashboard snapshots are sent to subscriptions. +github.com/databricks/databricks-sdk-go/service/jobs.SqlTaskFile: + "path": + "description": |- + Path of the SQL file. Must be relative if the source is a remote Git repository and absolute for workspace paths. + "source": + "description": |- + Optional location type of the SQL file. When set to `WORKSPACE`, the SQL file will be retrieved + from the local Databricks workspace. When set to `GIT`, the SQL file will be retrieved from a Git repository + defined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise. + + * `WORKSPACE`: SQL file is located in Databricks workspace. + * `GIT`: SQL file is located in cloud Git provider. +github.com/databricks/databricks-sdk-go/service/jobs.SqlTaskQuery: + "query_id": + "description": |- + The canonical identifier of the SQL query. +github.com/databricks/databricks-sdk-go/service/jobs.SqlTaskSubscription: + "destination_id": + "description": |- + The canonical identifier of the destination to receive email notification. This parameter is mutually exclusive with user_name. You cannot set both destination_id and user_name for subscription notifications. + "user_name": + "description": |- + The user name to receive the subscription email. This parameter is mutually exclusive with destination_id. You cannot set both destination_id and user_name for subscription notifications. +github.com/databricks/databricks-sdk-go/service/jobs.TableUpdateTriggerConfiguration: + "condition": + "description": |- + The table(s) condition based on which to trigger a job run. + "min_time_between_triggers_seconds": + "description": |- + If set, the trigger starts a run only after the specified amount of time has passed since + the last time the trigger fired. The minimum allowed value is 60 seconds. + "table_names": + "description": |- + A list of Delta tables to monitor for changes. The table name must be in the format `catalog_name.schema_name.table_name`. + "wait_after_last_change_seconds": + "description": |- + If set, the trigger starts a run only after no table updates have occurred for the specified time + and can be used to wait for a series of table updates before triggering a run. The + minimum allowed value is 60 seconds. +github.com/databricks/databricks-sdk-go/service/jobs.Task: + "clean_rooms_notebook_task": + "description": |- + The task runs a [clean rooms](https://docs.databricks.com/en/clean-rooms/index.html) notebook + when the `clean_rooms_notebook_task` field is present. + "condition_task": + "description": |- + The task evaluates a condition that can be used to control the execution of other tasks when the `condition_task` field is present. + The condition task does not require a cluster to execute and does not support retries or notifications. + "dbt_task": + "description": |- + The task runs one or more dbt commands when the `dbt_task` field is present. The dbt task requires both Databricks SQL and the ability to use a serverless or a pro SQL warehouse. + "depends_on": + "description": |- + An optional array of objects specifying the dependency graph of the task. All tasks specified in this field must complete before executing this task. The task will run only if the `run_if` condition is true. + The key is `task_key`, and the value is the name assigned to the dependent task. + "description": + "description": |- + An optional description for this task. + "disable_auto_optimization": + "description": |- + An option to disable auto optimization in serverless + "email_notifications": + "description": |- + An optional set of email addresses that is notified when runs of this task begin or complete as well as when this task is deleted. The default behavior is to not send any emails. + "environment_key": + "description": |- + The key that references an environment spec in a job. This field is required for Python script, Python wheel and dbt tasks when using serverless compute. + "existing_cluster_id": + "description": |- + If existing_cluster_id, the ID of an existing cluster that is used for all runs. + When running jobs or tasks on an existing cluster, you may need to manually restart + the cluster if it stops responding. We suggest running jobs and tasks on new clusters for + greater reliability + "for_each_task": + "description": |- + The task executes a nested task for every input provided when the `for_each_task` field is present. + "health": {} + "job_cluster_key": + "description": |- + If job_cluster_key, this task is executed reusing the cluster specified in `job.settings.job_clusters`. + "libraries": + "description": |- + An optional list of libraries to be installed on the cluster. + The default value is an empty list. + "max_retries": + "description": |- + An optional maximum number of times to retry an unsuccessful run. A run is considered to be unsuccessful if it completes with the `FAILED` result_state or `INTERNAL_ERROR` `life_cycle_state`. The value `-1` means to retry indefinitely and the value `0` means to never retry. + "min_retry_interval_millis": + "description": |- + An optional minimal interval in milliseconds between the start of the failed run and the subsequent retry run. The default behavior is that unsuccessful runs are immediately retried. + "new_cluster": + "description": |- + If new_cluster, a description of a new cluster that is created for each run. + "notebook_task": + "description": |- + The task runs a notebook when the `notebook_task` field is present. + "notification_settings": + "description": |- + Optional notification settings that are used when sending notifications to each of the `email_notifications` and `webhook_notifications` for this task. + "pipeline_task": + "description": |- + The task triggers a pipeline update when the `pipeline_task` field is present. Only pipelines configured to use triggered more are supported. + "python_wheel_task": + "description": |- + The task runs a Python wheel when the `python_wheel_task` field is present. + "retry_on_timeout": + "description": |- + An optional policy to specify whether to retry a job when it times out. The default behavior + is to not retry on timeout. + "run_if": + "description": |- + An optional value specifying the condition determining whether the task is run once its dependencies have been completed. + + * `ALL_SUCCESS`: All dependencies have executed and succeeded + * `AT_LEAST_ONE_SUCCESS`: At least one dependency has succeeded + * `NONE_FAILED`: None of the dependencies have failed and at least one was executed + * `ALL_DONE`: All dependencies have been completed + * `AT_LEAST_ONE_FAILED`: At least one dependency failed + * `ALL_FAILED`: ALl dependencies have failed + "run_job_task": + "description": |- + The task triggers another job when the `run_job_task` field is present. + "spark_jar_task": + "description": |- + The task runs a JAR when the `spark_jar_task` field is present. + "spark_python_task": + "description": |- + The task runs a Python file when the `spark_python_task` field is present. + "spark_submit_task": + "description": |- + (Legacy) The task runs the spark-submit script when the `spark_submit_task` field is present. This task can run only on new clusters and is not compatible with serverless compute. + + In the `new_cluster` specification, `libraries` and `spark_conf` are not supported. Instead, use `--jars` and `--py-files` to add Java and Python libraries and `--conf` to set the Spark configurations. + + `master`, `deploy-mode`, and `executor-cores` are automatically configured by Databricks; you _cannot_ specify them in parameters. + + By default, the Spark submit job uses all available memory (excluding reserved memory for Databricks services). You can set `--driver-memory`, and `--executor-memory` to a smaller value to leave some room for off-heap usage. + + The `--jars`, `--py-files`, `--files` arguments support DBFS and S3 paths. + "sql_task": + "description": |- + The task runs a SQL query or file, or it refreshes a SQL alert or a legacy SQL dashboard when the `sql_task` field is present. + "task_key": + "description": |- + A unique name for the task. This field is used to refer to this task from other tasks. + This field is required and must be unique within its parent job. + On Update or Reset, this field is used to reference the tasks to be updated or reset. + "timeout_seconds": + "description": |- + An optional timeout applied to each run of this job task. A value of `0` means no timeout. + "webhook_notifications": + "description": |- + A collection of system notification IDs to notify when runs of this task begin or complete. The default behavior is to not send any system notifications. +github.com/databricks/databricks-sdk-go/service/jobs.TaskDependency: + "outcome": + "description": |- + Can only be specified on condition task dependencies. The outcome of the dependent task that must be met for this task to run. + "task_key": + "description": |- + The name of the task this task depends on. +github.com/databricks/databricks-sdk-go/service/jobs.TaskEmailNotifications: + "no_alert_for_skipped_runs": + "description": |- + If true, do not send email to recipients specified in `on_failure` if the run is skipped. + This field is `deprecated`. Please use the `notification_settings.no_alert_for_skipped_runs` field. + "on_duration_warning_threshold_exceeded": + "description": |- + A list of email addresses to be notified when the duration of a run exceeds the threshold specified for the `RUN_DURATION_SECONDS` metric in the `health` field. If no rule for the `RUN_DURATION_SECONDS` metric is specified in the `health` field for the job, notifications are not sent. + "on_failure": + "description": |- + A list of email addresses to be notified when a run unsuccessfully completes. A run is considered to have completed unsuccessfully if it ends with an `INTERNAL_ERROR` `life_cycle_state` or a `FAILED`, or `TIMED_OUT` result_state. If this is not specified on job creation, reset, or update the list is empty, and notifications are not sent. + "on_start": + "description": |- + A list of email addresses to be notified when a run begins. If not specified on job creation, reset, or update, the list is empty, and notifications are not sent. + "on_streaming_backlog_exceeded": + "description": |- + A list of email addresses to notify when any streaming backlog thresholds are exceeded for any stream. + Streaming backlog thresholds can be set in the `health` field using the following metrics: `STREAMING_BACKLOG_BYTES`, `STREAMING_BACKLOG_RECORDS`, `STREAMING_BACKLOG_SECONDS`, or `STREAMING_BACKLOG_FILES`. + Alerting is based on the 10-minute average of these metrics. If the issue persists, notifications are resent every 30 minutes. + "on_success": + "description": |- + A list of email addresses to be notified when a run successfully completes. A run is considered to have completed successfully if it ends with a `TERMINATED` `life_cycle_state` and a `SUCCESS` result_state. If not specified on job creation, reset, or update, the list is empty, and notifications are not sent. +github.com/databricks/databricks-sdk-go/service/jobs.TaskNotificationSettings: + "alert_on_last_attempt": + "description": |- + If true, do not send notifications to recipients specified in `on_start` for the retried runs and do not send notifications to recipients specified in `on_failure` until the last retry of the run. + "no_alert_for_canceled_runs": + "description": |- + If true, do not send notifications to recipients specified in `on_failure` if the run is canceled. + "no_alert_for_skipped_runs": + "description": |- + If true, do not send notifications to recipients specified in `on_failure` if the run is skipped. +github.com/databricks/databricks-sdk-go/service/jobs.TriggerSettings: + "file_arrival": + "description": |- + File arrival trigger settings. + "pause_status": + "description": |- + Whether this trigger is paused or not. + "periodic": + "description": |- + Periodic trigger settings. + "table": + "description": |- + Old table trigger settings name. Deprecated in favor of `table_update`. + "table_update": {} +github.com/databricks/databricks-sdk-go/service/jobs.Webhook: + "id": {} +github.com/databricks/databricks-sdk-go/service/jobs.WebhookNotifications: + "on_duration_warning_threshold_exceeded": + "description": |- + An optional list of system notification IDs to call when the duration of a run exceeds the threshold specified for the `RUN_DURATION_SECONDS` metric in the `health` field. A maximum of 3 destinations can be specified for the `on_duration_warning_threshold_exceeded` property. + "on_failure": + "description": |- + An optional list of system notification IDs to call when the run fails. A maximum of 3 destinations can be specified for the `on_failure` property. + "on_start": + "description": |- + An optional list of system notification IDs to call when the run starts. A maximum of 3 destinations can be specified for the `on_start` property. + "on_streaming_backlog_exceeded": + "description": |- + An optional list of system notification IDs to call when any streaming backlog thresholds are exceeded for any stream. + Streaming backlog thresholds can be set in the `health` field using the following metrics: `STREAMING_BACKLOG_BYTES`, `STREAMING_BACKLOG_RECORDS`, `STREAMING_BACKLOG_SECONDS`, or `STREAMING_BACKLOG_FILES`. + Alerting is based on the 10-minute average of these metrics. If the issue persists, notifications are resent every 30 minutes. + A maximum of 3 destinations can be specified for the `on_streaming_backlog_exceeded` property. + "on_success": + "description": |- + An optional list of system notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified for the `on_success` property. +github.com/databricks/databricks-sdk-go/service/ml.ExperimentTag: + "key": + "description": |- + The tag key. + "value": + "description": |- + The tag value. +github.com/databricks/databricks-sdk-go/service/ml.ModelTag: + "key": + "description": |- + The tag key. + "value": + "description": |- + The tag value. +github.com/databricks/databricks-sdk-go/service/ml.ModelVersion: + "creation_timestamp": + "description": |- + Timestamp recorded when this `model_version` was created. + "current_stage": + "description": |- + Current stage for this `model_version`. + "description": + "description": |- + Description of this `model_version`. + "last_updated_timestamp": + "description": |- + Timestamp recorded when metadata for this `model_version` was last updated. + "name": + "description": |- + Unique name of the model + "run_id": + "description": |- + MLflow run ID used when creating `model_version`, if `source` was generated by an + experiment run stored in MLflow tracking server. + "run_link": + "description": |- + Run Link: Direct link to the run that generated this version + "source": + "description": |- + URI indicating the location of the source model artifacts, used when creating `model_version` + "status": + "description": |- + Current status of `model_version` + "status_message": + "description": |- + Details on current `status`, if it is pending or failed. + "tags": + "description": |- + Tags: Additional metadata key-value pairs for this `model_version`. + "user_id": + "description": |- + User that created this `model_version`. + "version": + "description": |- + Model's version number. +github.com/databricks/databricks-sdk-go/service/ml.ModelVersionStatus: + "_": + "description": |- + Current status of `model_version` + "enum": + - |- + PENDING_REGISTRATION + - |- + FAILED_REGISTRATION + - |- + READY +github.com/databricks/databricks-sdk-go/service/ml.ModelVersionTag: + "key": + "description": |- + The tag key. + "value": + "description": |- + The tag value. +github.com/databricks/databricks-sdk-go/service/pipelines.CronTrigger: + "quartz_cron_schedule": {} + "timezone_id": {} +github.com/databricks/databricks-sdk-go/service/pipelines.DeploymentKind: + "_": + "description": | + The deployment method that manages the pipeline: + - BUNDLE: The pipeline is managed by a Databricks Asset Bundle. + "enum": + - |- + BUNDLE +github.com/databricks/databricks-sdk-go/service/pipelines.FileLibrary: + "path": + "description": |- + The absolute path of the file. +github.com/databricks/databricks-sdk-go/service/pipelines.Filters: + "exclude": + "description": |- + Paths to exclude. + "include": + "description": |- + Paths to include. +github.com/databricks/databricks-sdk-go/service/pipelines.IngestionConfig: + "report": + "description": |- + Select a specific source report. + "schema": + "description": |- + Select all tables from a specific source schema. + "table": + "description": |- + Select a specific source table. +github.com/databricks/databricks-sdk-go/service/pipelines.IngestionGatewayPipelineDefinition: + "connection_id": + "description": |- + [Deprecated, use connection_name instead] Immutable. The Unity Catalog connection that this gateway pipeline uses to communicate with the source. + "connection_name": + "description": |- + Immutable. The Unity Catalog connection that this gateway pipeline uses to communicate with the source. + "gateway_storage_catalog": + "description": |- + Required, Immutable. The name of the catalog for the gateway pipeline's storage location. + "gateway_storage_name": + "description": | + Optional. The Unity Catalog-compatible name for the gateway storage location. + This is the destination to use for the data that is extracted by the gateway. + Delta Live Tables system will automatically create the storage location under the catalog and schema. + "gateway_storage_schema": + "description": |- + Required, Immutable. The name of the schema for the gateway pipelines's storage location. +github.com/databricks/databricks-sdk-go/service/pipelines.IngestionPipelineDefinition: + "connection_name": + "description": |- + Immutable. The Unity Catalog connection that this ingestion pipeline uses to communicate with the source. This is used with connectors for applications like Salesforce, Workday, and so on. + "ingestion_gateway_id": + "description": |- + Immutable. Identifier for the gateway that is used by this ingestion pipeline to communicate with the source database. This is used with connectors to databases like SQL Server. + "objects": + "description": |- + Required. Settings specifying tables to replicate and the destination for the replicated tables. + "table_configuration": + "description": |- + Configuration settings to control the ingestion of tables. These settings are applied to all tables in the pipeline. +github.com/databricks/databricks-sdk-go/service/pipelines.ManualTrigger: {} +github.com/databricks/databricks-sdk-go/service/pipelines.NotebookLibrary: + "path": + "description": |- + The absolute path of the notebook. +github.com/databricks/databricks-sdk-go/service/pipelines.Notifications: + "alerts": + "description": | + A list of alerts that trigger the sending of notifications to the configured + destinations. The supported alerts are: + + * `on-update-success`: A pipeline update completes successfully. + * `on-update-failure`: Each time a pipeline update fails. + * `on-update-fatal-failure`: A pipeline update fails with a non-retryable (fatal) error. + * `on-flow-failure`: A single data flow fails. + "email_recipients": + "description": | + A list of email addresses notified when a configured alert is triggered. +github.com/databricks/databricks-sdk-go/service/pipelines.PipelineCluster: + "apply_policy_default_values": + "description": |- + Note: This field won't be persisted. Only API users will check this field. + "autoscale": + "description": |- + Parameters needed in order to automatically scale clusters up and down based on load. + Note: autoscaling works best with DB runtime versions 3.0 or later. + "aws_attributes": + "description": |- + Attributes related to clusters running on Amazon Web Services. + If not specified at cluster creation, a set of default values will be used. + "azure_attributes": + "description": |- + Attributes related to clusters running on Microsoft Azure. + If not specified at cluster creation, a set of default values will be used. + "cluster_log_conf": + "description": | + The configuration for delivering spark logs to a long-term storage destination. + Only dbfs destinations are supported. Only one destination can be specified + for one cluster. If the conf is given, the logs will be delivered to the destination every + `5 mins`. The destination of driver logs is `$destination/$clusterId/driver`, while + the destination of executor logs is `$destination/$clusterId/executor`. + "custom_tags": + "description": |- + Additional tags for cluster resources. Databricks will tag all cluster resources (e.g., AWS + instances and EBS volumes) with these tags in addition to `default_tags`. Notes: + + - Currently, Databricks allows at most 45 custom tags + + - Clusters can only reuse cloud resources if the resources' tags are a subset of the cluster tags + "driver_instance_pool_id": + "description": |- + The optional ID of the instance pool for the driver of the cluster belongs. + The pool cluster uses the instance pool with id (instance_pool_id) if the driver pool is not + assigned. + "driver_node_type_id": + "description": |- + The node type of the Spark driver. + Note that this field is optional; if unset, the driver node type will be set as the same value + as `node_type_id` defined above. + "enable_local_disk_encryption": + "description": |- + Whether to enable local disk encryption for the cluster. + "gcp_attributes": + "description": |- + Attributes related to clusters running on Google Cloud Platform. + If not specified at cluster creation, a set of default values will be used. + "init_scripts": + "description": |- + The configuration for storing init scripts. Any number of destinations can be specified. The scripts are executed sequentially in the order provided. If `cluster_log_conf` is specified, init script logs are sent to `//init_scripts`. + "instance_pool_id": + "description": |- + The optional ID of the instance pool to which the cluster belongs. + "label": + "description": |- + A label for the cluster specification, either `default` to configure the default cluster, or `maintenance` to configure the maintenance cluster. This field is optional. The default value is `default`. + "node_type_id": + "description": | + This field encodes, through a single value, the resources available to each of + the Spark nodes in this cluster. For example, the Spark nodes can be provisioned + and optimized for memory or compute intensive workloads. A list of available node + types can be retrieved by using the :method:clusters/listNodeTypes API call. + "num_workers": + "description": |- + Number of worker nodes that this cluster should have. A cluster has one Spark Driver + and `num_workers` Executors for a total of `num_workers` + 1 Spark nodes. + + Note: When reading the properties of a cluster, this field reflects the desired number + of workers rather than the actual current number of workers. For instance, if a cluster + is resized from 5 to 10 workers, this field will immediately be updated to reflect + the target size of 10 workers, whereas the workers listed in `spark_info` will gradually + increase from 5 to 10 as the new nodes are provisioned. + "policy_id": + "description": |- + The ID of the cluster policy used to create the cluster if applicable. + "spark_conf": + "description": | + An object containing a set of optional, user-specified Spark configuration key-value pairs. + See :method:clusters/create for more details. + "spark_env_vars": + "description": |- + An object containing a set of optional, user-specified environment variable key-value pairs. + Please note that key-value pair of the form (X,Y) will be exported as is (i.e., + `export X='Y'`) while launching the driver and workers. + + In order to specify an additional set of `SPARK_DAEMON_JAVA_OPTS`, we recommend appending + them to `$SPARK_DAEMON_JAVA_OPTS` as shown in the example below. This ensures that all + default databricks managed environmental variables are included as well. + + Example Spark environment variables: + `{"SPARK_WORKER_MEMORY": "28000m", "SPARK_LOCAL_DIRS": "/local_disk0"}` or + `{"SPARK_DAEMON_JAVA_OPTS": "$SPARK_DAEMON_JAVA_OPTS -Dspark.shuffle.service.enabled=true"}` + "ssh_public_keys": + "description": |- + SSH public key contents that will be added to each Spark node in this cluster. The + corresponding private keys can be used to login with the user name `ubuntu` on port `2200`. + Up to 10 keys can be specified. +github.com/databricks/databricks-sdk-go/service/pipelines.PipelineClusterAutoscale: + "max_workers": + "description": |- + The maximum number of workers to which the cluster can scale up when overloaded. `max_workers` must be strictly greater than `min_workers`. + "min_workers": + "description": |- + The minimum number of workers the cluster can scale down to when underutilized. + It is also the initial number of workers the cluster will have after creation. + "mode": + "description": | + Databricks Enhanced Autoscaling optimizes cluster utilization by automatically + allocating cluster resources based on workload volume, with minimal impact to + the data processing latency of your pipelines. Enhanced Autoscaling is available + for `updates` clusters only. The legacy autoscaling feature is used for `maintenance` + clusters. +github.com/databricks/databricks-sdk-go/service/pipelines.PipelineClusterAutoscaleMode: + "_": + "description": | + Databricks Enhanced Autoscaling optimizes cluster utilization by automatically + allocating cluster resources based on workload volume, with minimal impact to + the data processing latency of your pipelines. Enhanced Autoscaling is available + for `updates` clusters only. The legacy autoscaling feature is used for `maintenance` + clusters. + "enum": + - |- + ENHANCED + - |- + LEGACY +github.com/databricks/databricks-sdk-go/service/pipelines.PipelineDeployment: + "kind": + "description": |- + The deployment method that manages the pipeline. + "metadata_file_path": + "description": |- + The path to the file containing metadata about the deployment. +github.com/databricks/databricks-sdk-go/service/pipelines.PipelineLibrary: + "file": + "description": | + The path to a file that defines a pipeline and is stored in the Databricks Repos. + "jar": + "description": | + URI of the jar to be installed. Currently only DBFS is supported. + "maven": + "description": | + Specification of a maven library to be installed. + "notebook": + "description": | + The path to a notebook that defines a pipeline and is stored in the Databricks workspace. + "whl": + "description": |- + URI of the whl to be installed. +github.com/databricks/databricks-sdk-go/service/pipelines.PipelineTrigger: + "cron": {} + "manual": {} +github.com/databricks/databricks-sdk-go/service/pipelines.ReportSpec: + "destination_catalog": + "description": |- + Required. Destination catalog to store table. + "destination_schema": + "description": |- + Required. Destination schema to store table. + "destination_table": + "description": |- + Required. Destination table name. The pipeline fails if a table with that name already exists. + "source_url": + "description": |- + Required. Report URL in the source system. + "table_configuration": + "description": |- + Configuration settings to control the ingestion of tables. These settings override the table_configuration defined in the IngestionPipelineDefinition object. +github.com/databricks/databricks-sdk-go/service/pipelines.RestartWindow: + "days_of_week": + "description": |- + Days of week in which the restart is allowed to happen (within a five-hour window starting at start_hour). + If not specified all days of the week will be used. + "start_hour": + "description": |- + An integer between 0 and 23 denoting the start hour for the restart window in the 24-hour day. + Continuous pipeline restart is triggered only within a five-hour window starting at this hour. + "time_zone_id": + "description": |- + Time zone id of restart window. See https://docs.databricks.com/sql/language-manual/sql-ref-syntax-aux-conf-mgmt-set-timezone.html for details. + If not specified, UTC will be used. +github.com/databricks/databricks-sdk-go/service/pipelines.RestartWindowDaysOfWeek: + "_": + "description": |- + Days of week in which the restart is allowed to happen (within a five-hour window starting at start_hour). + If not specified all days of the week will be used. + "enum": + - |- + MONDAY + - |- + TUESDAY + - |- + WEDNESDAY + - |- + THURSDAY + - |- + FRIDAY + - |- + SATURDAY + - |- + SUNDAY +github.com/databricks/databricks-sdk-go/service/pipelines.SchemaSpec: + "destination_catalog": + "description": |- + Required. Destination catalog to store tables. + "destination_schema": + "description": |- + Required. Destination schema to store tables in. Tables with the same name as the source tables are created in this destination schema. The pipeline fails If a table with the same name already exists. + "source_catalog": + "description": |- + The source catalog name. Might be optional depending on the type of source. + "source_schema": + "description": |- + Required. Schema name in the source database. + "table_configuration": + "description": |- + Configuration settings to control the ingestion of tables. These settings are applied to all tables in this schema and override the table_configuration defined in the IngestionPipelineDefinition object. +github.com/databricks/databricks-sdk-go/service/pipelines.TableSpec: + "destination_catalog": + "description": |- + Required. Destination catalog to store table. + "destination_schema": + "description": |- + Required. Destination schema to store table. + "destination_table": + "description": |- + Optional. Destination table name. The pipeline fails if a table with that name already exists. If not set, the source table name is used. + "source_catalog": + "description": |- + Source catalog name. Might be optional depending on the type of source. + "source_schema": + "description": |- + Schema name in the source database. Might be optional depending on the type of source. + "source_table": + "description": |- + Required. Table name in the source database. + "table_configuration": + "description": |- + Configuration settings to control the ingestion of tables. These settings override the table_configuration defined in the IngestionPipelineDefinition object and the SchemaSpec. +github.com/databricks/databricks-sdk-go/service/pipelines.TableSpecificConfig: + "primary_keys": + "description": |- + The primary key of the table used to apply changes. + "salesforce_include_formula_fields": + "description": |- + If true, formula fields defined in the table are included in the ingestion. This setting is only valid for the Salesforce connector + "scd_type": + "description": |- + The SCD type to use to ingest the table. + "sequence_by": + "description": |- + The column names specifying the logical order of events in the source data. Delta Live Tables uses this sequencing to handle change events that arrive out of order. +github.com/databricks/databricks-sdk-go/service/pipelines.TableSpecificConfigScdType: + "_": + "description": |- + The SCD type to use to ingest the table. + "enum": + - |- + SCD_TYPE_1 + - |- + SCD_TYPE_2 +github.com/databricks/databricks-sdk-go/service/serving.Ai21LabsConfig: + "ai21labs_api_key": + "description": |- + The Databricks secret key reference for an AI21 Labs API key. If you prefer to paste your API key directly, see `ai21labs_api_key_plaintext`. You must provide an API key using one of the following fields: `ai21labs_api_key` or `ai21labs_api_key_plaintext`. + "ai21labs_api_key_plaintext": + "description": |- + An AI21 Labs API key provided as a plaintext string. If you prefer to reference your key using Databricks Secrets, see `ai21labs_api_key`. You must provide an API key using one of the following fields: `ai21labs_api_key` or `ai21labs_api_key_plaintext`. +github.com/databricks/databricks-sdk-go/service/serving.AiGatewayConfig: + "guardrails": + "description": |- + Configuration for AI Guardrails to prevent unwanted data and unsafe data in requests and responses. + "inference_table_config": + "description": |- + Configuration for payload logging using inference tables. Use these tables to monitor and audit data being sent to and received from model APIs and to improve model quality. + "rate_limits": + "description": |- + Configuration for rate limits which can be set to limit endpoint traffic. + "usage_tracking_config": + "description": |- + Configuration to enable usage tracking using system tables. These tables allow you to monitor operational usage on endpoints and their associated costs. +github.com/databricks/databricks-sdk-go/service/serving.AiGatewayGuardrailParameters: + "invalid_keywords": + "description": |- + List of invalid keywords. AI guardrail uses keyword or string matching to decide if the keyword exists in the request or response content. + "pii": + "description": |- + Configuration for guardrail PII filter. + "safety": + "description": |- + Indicates whether the safety filter is enabled. + "valid_topics": + "description": |- + The list of allowed topics. Given a chat request, this guardrail flags the request if its topic is not in the allowed topics. +github.com/databricks/databricks-sdk-go/service/serving.AiGatewayGuardrailPiiBehavior: + "behavior": + "description": |- + Behavior for PII filter. Currently only 'BLOCK' is supported. If 'BLOCK' is set for the input guardrail and the request contains PII, the request is not sent to the model server and 400 status code is returned; if 'BLOCK' is set for the output guardrail and the model response contains PII, the PII info in the response is redacted and 400 status code is returned. +github.com/databricks/databricks-sdk-go/service/serving.AiGatewayGuardrailPiiBehaviorBehavior: + "_": + "description": |- + Behavior for PII filter. Currently only 'BLOCK' is supported. If 'BLOCK' is set for the input guardrail and the request contains PII, the request is not sent to the model server and 400 status code is returned; if 'BLOCK' is set for the output guardrail and the model response contains PII, the PII info in the response is redacted and 400 status code is returned. + "enum": + - |- + NONE + - |- + BLOCK +github.com/databricks/databricks-sdk-go/service/serving.AiGatewayGuardrails: + "input": + "description": |- + Configuration for input guardrail filters. + "output": + "description": |- + Configuration for output guardrail filters. +github.com/databricks/databricks-sdk-go/service/serving.AiGatewayInferenceTableConfig: + "catalog_name": + "description": |- + The name of the catalog in Unity Catalog. Required when enabling inference tables. NOTE: On update, you have to disable inference table first in order to change the catalog name. + "enabled": + "description": |- + Indicates whether the inference table is enabled. + "schema_name": + "description": |- + The name of the schema in Unity Catalog. Required when enabling inference tables. NOTE: On update, you have to disable inference table first in order to change the schema name. + "table_name_prefix": + "description": |- + The prefix of the table in Unity Catalog. NOTE: On update, you have to disable inference table first in order to change the prefix name. +github.com/databricks/databricks-sdk-go/service/serving.AiGatewayRateLimit: + "calls": + "description": |- + Used to specify how many calls are allowed for a key within the renewal_period. + "key": + "description": |- + Key field for a rate limit. Currently, only 'user' and 'endpoint' are supported, with 'endpoint' being the default if not specified. + "renewal_period": + "description": |- + Renewal period field for a rate limit. Currently, only 'minute' is supported. +github.com/databricks/databricks-sdk-go/service/serving.AiGatewayRateLimitKey: + "_": + "description": |- + Key field for a rate limit. Currently, only 'user' and 'endpoint' are supported, with 'endpoint' being the default if not specified. + "enum": + - |- + user + - |- + endpoint +github.com/databricks/databricks-sdk-go/service/serving.AiGatewayRateLimitRenewalPeriod: + "_": + "description": |- + Renewal period field for a rate limit. Currently, only 'minute' is supported. + "enum": + - |- + minute +github.com/databricks/databricks-sdk-go/service/serving.AiGatewayUsageTrackingConfig: + "enabled": + "description": |- + Whether to enable usage tracking. +github.com/databricks/databricks-sdk-go/service/serving.AmazonBedrockConfig: + "aws_access_key_id": + "description": |- + The Databricks secret key reference for an AWS access key ID with permissions to interact with Bedrock services. If you prefer to paste your API key directly, see `aws_access_key_id`. You must provide an API key using one of the following fields: `aws_access_key_id` or `aws_access_key_id_plaintext`. + "aws_access_key_id_plaintext": + "description": |- + An AWS access key ID with permissions to interact with Bedrock services provided as a plaintext string. If you prefer to reference your key using Databricks Secrets, see `aws_access_key_id`. You must provide an API key using one of the following fields: `aws_access_key_id` or `aws_access_key_id_plaintext`. + "aws_region": + "description": |- + The AWS region to use. Bedrock has to be enabled there. + "aws_secret_access_key": + "description": |- + The Databricks secret key reference for an AWS secret access key paired with the access key ID, with permissions to interact with Bedrock services. If you prefer to paste your API key directly, see `aws_secret_access_key_plaintext`. You must provide an API key using one of the following fields: `aws_secret_access_key` or `aws_secret_access_key_plaintext`. + "aws_secret_access_key_plaintext": + "description": |- + An AWS secret access key paired with the access key ID, with permissions to interact with Bedrock services provided as a plaintext string. If you prefer to reference your key using Databricks Secrets, see `aws_secret_access_key`. You must provide an API key using one of the following fields: `aws_secret_access_key` or `aws_secret_access_key_plaintext`. + "bedrock_provider": + "description": |- + The underlying provider in Amazon Bedrock. Supported values (case insensitive) include: Anthropic, Cohere, AI21Labs, Amazon. +github.com/databricks/databricks-sdk-go/service/serving.AmazonBedrockConfigBedrockProvider: + "_": + "description": |- + The underlying provider in Amazon Bedrock. Supported values (case insensitive) include: Anthropic, Cohere, AI21Labs, Amazon. + "enum": + - |- + anthropic + - |- + cohere + - |- + ai21labs + - |- + amazon +github.com/databricks/databricks-sdk-go/service/serving.AnthropicConfig: + "anthropic_api_key": + "description": |- + The Databricks secret key reference for an Anthropic API key. If you prefer to paste your API key directly, see `anthropic_api_key_plaintext`. You must provide an API key using one of the following fields: `anthropic_api_key` or `anthropic_api_key_plaintext`. + "anthropic_api_key_plaintext": + "description": |- + The Anthropic API key provided as a plaintext string. If you prefer to reference your key using Databricks Secrets, see `anthropic_api_key`. You must provide an API key using one of the following fields: `anthropic_api_key` or `anthropic_api_key_plaintext`. +github.com/databricks/databricks-sdk-go/service/serving.AutoCaptureConfigInput: + "catalog_name": + "description": |- + The name of the catalog in Unity Catalog. NOTE: On update, you cannot change the catalog name if the inference table is already enabled. + "enabled": + "description": |- + Indicates whether the inference table is enabled. + "schema_name": + "description": |- + The name of the schema in Unity Catalog. NOTE: On update, you cannot change the schema name if the inference table is already enabled. + "table_name_prefix": + "description": |- + The prefix of the table in Unity Catalog. NOTE: On update, you cannot change the prefix name if the inference table is already enabled. +github.com/databricks/databricks-sdk-go/service/serving.CohereConfig: + "cohere_api_base": + "description": "This is an optional field to provide a customized base URL for the Cohere API. \nIf left unspecified, the standard Cohere base URL is used.\n" + "cohere_api_key": + "description": |- + The Databricks secret key reference for a Cohere API key. If you prefer to paste your API key directly, see `cohere_api_key_plaintext`. You must provide an API key using one of the following fields: `cohere_api_key` or `cohere_api_key_plaintext`. + "cohere_api_key_plaintext": + "description": |- + The Cohere API key provided as a plaintext string. If you prefer to reference your key using Databricks Secrets, see `cohere_api_key`. You must provide an API key using one of the following fields: `cohere_api_key` or `cohere_api_key_plaintext`. +github.com/databricks/databricks-sdk-go/service/serving.DatabricksModelServingConfig: + "databricks_api_token": + "description": | + The Databricks secret key reference for a Databricks API token that corresponds to a user or service + principal with Can Query access to the model serving endpoint pointed to by this external model. + If you prefer to paste your API key directly, see `databricks_api_token_plaintext`. + You must provide an API key using one of the following fields: `databricks_api_token` or `databricks_api_token_plaintext`. + "databricks_api_token_plaintext": + "description": | + The Databricks API token that corresponds to a user or service + principal with Can Query access to the model serving endpoint pointed to by this external model provided as a plaintext string. + If you prefer to reference your key using Databricks Secrets, see `databricks_api_token`. + You must provide an API key using one of the following fields: `databricks_api_token` or `databricks_api_token_plaintext`. + "databricks_workspace_url": + "description": | + The URL of the Databricks workspace containing the model serving endpoint pointed to by this external model. +github.com/databricks/databricks-sdk-go/service/serving.EndpointCoreConfigInput: + "auto_capture_config": + "description": |- + Configuration for Inference Tables which automatically logs requests and responses to Unity Catalog. + "served_entities": + "description": |- + A list of served entities for the endpoint to serve. A serving endpoint can have up to 15 served entities. + "served_models": + "description": |- + (Deprecated, use served_entities instead) A list of served models for the endpoint to serve. A serving endpoint can have up to 15 served models. + "traffic_config": + "description": |- + The traffic config defining how invocations to the serving endpoint should be routed. +github.com/databricks/databricks-sdk-go/service/serving.EndpointTag: + "key": + "description": |- + Key field for a serving endpoint tag. + "value": + "description": |- + Optional value field for a serving endpoint tag. +github.com/databricks/databricks-sdk-go/service/serving.ExternalModel: + "ai21labs_config": + "description": |- + AI21Labs Config. Only required if the provider is 'ai21labs'. + "amazon_bedrock_config": + "description": |- + Amazon Bedrock Config. Only required if the provider is 'amazon-bedrock'. + "anthropic_config": + "description": |- + Anthropic Config. Only required if the provider is 'anthropic'. + "cohere_config": + "description": |- + Cohere Config. Only required if the provider is 'cohere'. + "databricks_model_serving_config": + "description": |- + Databricks Model Serving Config. Only required if the provider is 'databricks-model-serving'. + "google_cloud_vertex_ai_config": + "description": |- + Google Cloud Vertex AI Config. Only required if the provider is 'google-cloud-vertex-ai'. + "name": + "description": |- + The name of the external model. + "openai_config": + "description": |- + OpenAI Config. Only required if the provider is 'openai'. + "palm_config": + "description": |- + PaLM Config. Only required if the provider is 'palm'. + "provider": + "description": | + The name of the provider for the external model. Currently, the supported providers are 'ai21labs', 'anthropic', + 'amazon-bedrock', 'cohere', 'databricks-model-serving', 'google-cloud-vertex-ai', 'openai', and 'palm'.", + "task": + "description": |- + The task type of the external model. +github.com/databricks/databricks-sdk-go/service/serving.ExternalModelProvider: + "_": + "description": | + The name of the provider for the external model. Currently, the supported providers are 'ai21labs', 'anthropic', + 'amazon-bedrock', 'cohere', 'databricks-model-serving', 'google-cloud-vertex-ai', 'openai', and 'palm'.", + "enum": + - |- + ai21labs + - |- + anthropic + - |- + amazon-bedrock + - |- + cohere + - |- + databricks-model-serving + - |- + google-cloud-vertex-ai + - |- + openai + - |- + palm +github.com/databricks/databricks-sdk-go/service/serving.GoogleCloudVertexAiConfig: + "private_key": + "description": |- + The Databricks secret key reference for a private key for the service account which has access to the Google Cloud Vertex AI Service. See [Best practices for managing service account keys](https://cloud.google.com/iam/docs/best-practices-for-managing-service-account-keys). If you prefer to paste your API key directly, see `private_key_plaintext`. You must provide an API key using one of the following fields: `private_key` or `private_key_plaintext` + "private_key_plaintext": + "description": |- + The private key for the service account which has access to the Google Cloud Vertex AI Service provided as a plaintext secret. See [Best practices for managing service account keys](https://cloud.google.com/iam/docs/best-practices-for-managing-service-account-keys). If you prefer to reference your key using Databricks Secrets, see `private_key`. You must provide an API key using one of the following fields: `private_key` or `private_key_plaintext`. + "project_id": + "description": |- + This is the Google Cloud project id that the service account is associated with. + "region": + "description": |- + This is the region for the Google Cloud Vertex AI Service. See [supported regions](https://cloud.google.com/vertex-ai/docs/general/locations) for more details. Some models are only available in specific regions. +github.com/databricks/databricks-sdk-go/service/serving.OpenAiConfig: + "microsoft_entra_client_id": + "description": | + This field is only required for Azure AD OpenAI and is the Microsoft Entra Client ID. + "microsoft_entra_client_secret": + "description": | + The Databricks secret key reference for a client secret used for Microsoft Entra ID authentication. + If you prefer to paste your client secret directly, see `microsoft_entra_client_secret_plaintext`. + You must provide an API key using one of the following fields: `microsoft_entra_client_secret` or `microsoft_entra_client_secret_plaintext`. + "microsoft_entra_client_secret_plaintext": + "description": | + The client secret used for Microsoft Entra ID authentication provided as a plaintext string. + If you prefer to reference your key using Databricks Secrets, see `microsoft_entra_client_secret`. + You must provide an API key using one of the following fields: `microsoft_entra_client_secret` or `microsoft_entra_client_secret_plaintext`. + "microsoft_entra_tenant_id": + "description": | + This field is only required for Azure AD OpenAI and is the Microsoft Entra Tenant ID. + "openai_api_base": + "description": | + This is a field to provide a customized base URl for the OpenAI API. + For Azure OpenAI, this field is required, and is the base URL for the Azure OpenAI API service + provided by Azure. + For other OpenAI API types, this field is optional, and if left unspecified, the standard OpenAI base URL is used. + "openai_api_key": + "description": |- + The Databricks secret key reference for an OpenAI API key using the OpenAI or Azure service. If you prefer to paste your API key directly, see `openai_api_key_plaintext`. You must provide an API key using one of the following fields: `openai_api_key` or `openai_api_key_plaintext`. + "openai_api_key_plaintext": + "description": |- + The OpenAI API key using the OpenAI or Azure service provided as a plaintext string. If you prefer to reference your key using Databricks Secrets, see `openai_api_key`. You must provide an API key using one of the following fields: `openai_api_key` or `openai_api_key_plaintext`. + "openai_api_type": + "description": | + This is an optional field to specify the type of OpenAI API to use. + For Azure OpenAI, this field is required, and adjust this parameter to represent the preferred security + access validation protocol. For access token validation, use azure. For authentication using Azure Active + Directory (Azure AD) use, azuread. + "openai_api_version": + "description": | + This is an optional field to specify the OpenAI API version. + For Azure OpenAI, this field is required, and is the version of the Azure OpenAI service to + utilize, specified by a date. + "openai_deployment_name": + "description": | + This field is only required for Azure OpenAI and is the name of the deployment resource for the + Azure OpenAI service. + "openai_organization": + "description": | + This is an optional field to specify the organization in OpenAI or Azure OpenAI. +github.com/databricks/databricks-sdk-go/service/serving.PaLmConfig: + "palm_api_key": + "description": |- + The Databricks secret key reference for a PaLM API key. If you prefer to paste your API key directly, see `palm_api_key_plaintext`. You must provide an API key using one of the following fields: `palm_api_key` or `palm_api_key_plaintext`. + "palm_api_key_plaintext": + "description": |- + The PaLM API key provided as a plaintext string. If you prefer to reference your key using Databricks Secrets, see `palm_api_key`. You must provide an API key using one of the following fields: `palm_api_key` or `palm_api_key_plaintext`. +github.com/databricks/databricks-sdk-go/service/serving.RateLimit: + "calls": + "description": |- + Used to specify how many calls are allowed for a key within the renewal_period. + "key": + "description": |- + Key field for a serving endpoint rate limit. Currently, only 'user' and 'endpoint' are supported, with 'endpoint' being the default if not specified. + "renewal_period": + "description": |- + Renewal period field for a serving endpoint rate limit. Currently, only 'minute' is supported. +github.com/databricks/databricks-sdk-go/service/serving.RateLimitKey: + "_": + "description": |- + Key field for a serving endpoint rate limit. Currently, only 'user' and 'endpoint' are supported, with 'endpoint' being the default if not specified. + "enum": + - |- + user + - |- + endpoint +github.com/databricks/databricks-sdk-go/service/serving.RateLimitRenewalPeriod: + "_": + "description": |- + Renewal period field for a serving endpoint rate limit. Currently, only 'minute' is supported. + "enum": + - |- + minute +github.com/databricks/databricks-sdk-go/service/serving.Route: + "served_model_name": + "description": |- + The name of the served model this route configures traffic for. + "traffic_percentage": + "description": |- + The percentage of endpoint traffic to send to this route. It must be an integer between 0 and 100 inclusive. +github.com/databricks/databricks-sdk-go/service/serving.ServedEntityInput: + "entity_name": + "description": | + The name of the entity to be served. The entity may be a model in the Databricks Model Registry, a model in the Unity Catalog (UC), + or a function of type FEATURE_SPEC in the UC. If it is a UC object, the full name of the object should be given in the form of + __catalog_name__.__schema_name__.__model_name__. + "entity_version": + "description": |- + The version of the model in Databricks Model Registry to be served or empty if the entity is a FEATURE_SPEC. + "environment_vars": + "description": "An object containing a set of optional, user-specified environment variable key-value pairs used for serving this entity.\nNote: this is an experimental feature and subject to change. \nExample entity environment variables that refer to Databricks secrets: `{\"OPENAI_API_KEY\": \"{{secrets/my_scope/my_key}}\", \"DATABRICKS_TOKEN\": \"{{secrets/my_scope2/my_key2}}\"}`" + "external_model": + "description": | + The external model to be served. NOTE: Only one of external_model and (entity_name, entity_version, workload_size, workload_type, and scale_to_zero_enabled) + can be specified with the latter set being used for custom model serving for a Databricks registered model. For an existing endpoint with external_model, + it cannot be updated to an endpoint without external_model. If the endpoint is created without external_model, users cannot update it to add external_model later. + The task type of all external models within an endpoint must be the same. + "instance_profile_arn": + "description": |- + ARN of the instance profile that the served entity uses to access AWS resources. + "max_provisioned_throughput": + "description": |- + The maximum tokens per second that the endpoint can scale up to. + "min_provisioned_throughput": + "description": |- + The minimum tokens per second that the endpoint can scale down to. + "name": + "description": | + The name of a served entity. It must be unique across an endpoint. A served entity name can consist of alphanumeric characters, dashes, and underscores. + If not specified for an external model, this field defaults to external_model.name, with '.' and ':' replaced with '-', and if not specified for other + entities, it defaults to -. + "scale_to_zero_enabled": + "description": |- + Whether the compute resources for the served entity should scale down to zero. + "workload_size": + "description": | + The workload size of the served entity. The workload size corresponds to a range of provisioned concurrency that the compute autoscales between. + A single unit of provisioned concurrency can process one request at a time. + Valid workload sizes are "Small" (4 - 4 provisioned concurrency), "Medium" (8 - 16 provisioned concurrency), and "Large" (16 - 64 provisioned concurrency). + If scale-to-zero is enabled, the lower bound of the provisioned concurrency for each workload size is 0. + "workload_type": + "description": | + The workload type of the served entity. The workload type selects which type of compute to use in the endpoint. The default value for this parameter is + "CPU". For deep learning workloads, GPU acceleration is available by selecting workload types like GPU_SMALL and others. + See the available [GPU types](https://docs.databricks.com/machine-learning/model-serving/create-manage-serving-endpoints.html#gpu-workload-types). +github.com/databricks/databricks-sdk-go/service/serving.ServedModelInput: + "environment_vars": + "description": "An object containing a set of optional, user-specified environment variable key-value pairs used for serving this model.\nNote: this is an experimental feature and subject to change. \nExample model environment variables that refer to Databricks secrets: `{\"OPENAI_API_KEY\": \"{{secrets/my_scope/my_key}}\", \"DATABRICKS_TOKEN\": \"{{secrets/my_scope2/my_key2}}\"}`" + "instance_profile_arn": + "description": |- + ARN of the instance profile that the served model will use to access AWS resources. + "max_provisioned_throughput": + "description": |- + The maximum tokens per second that the endpoint can scale up to. + "min_provisioned_throughput": + "description": |- + The minimum tokens per second that the endpoint can scale down to. + "model_name": + "description": | + The name of the model in Databricks Model Registry to be served or if the model resides in Unity Catalog, the full name of model, + in the form of __catalog_name__.__schema_name__.__model_name__. + "model_version": + "description": |- + The version of the model in Databricks Model Registry or Unity Catalog to be served. + "name": + "description": | + The name of a served model. It must be unique across an endpoint. If not specified, this field will default to -. + A served model name can consist of alphanumeric characters, dashes, and underscores. + "scale_to_zero_enabled": + "description": |- + Whether the compute resources for the served model should scale down to zero. + "workload_size": + "description": | + The workload size of the served model. The workload size corresponds to a range of provisioned concurrency that the compute will autoscale between. + A single unit of provisioned concurrency can process one request at a time. + Valid workload sizes are "Small" (4 - 4 provisioned concurrency), "Medium" (8 - 16 provisioned concurrency), and "Large" (16 - 64 provisioned concurrency). + If scale-to-zero is enabled, the lower bound of the provisioned concurrency for each workload size will be 0. + "workload_type": + "description": | + The workload type of the served model. The workload type selects which type of compute to use in the endpoint. The default value for this parameter is + "CPU". For deep learning workloads, GPU acceleration is available by selecting workload types like GPU_SMALL and others. + See the available [GPU types](https://docs.databricks.com/machine-learning/model-serving/create-manage-serving-endpoints.html#gpu-workload-types). +github.com/databricks/databricks-sdk-go/service/serving.ServedModelInputWorkloadSize: + "_": + "description": | + The workload size of the served model. The workload size corresponds to a range of provisioned concurrency that the compute will autoscale between. + A single unit of provisioned concurrency can process one request at a time. + Valid workload sizes are "Small" (4 - 4 provisioned concurrency), "Medium" (8 - 16 provisioned concurrency), and "Large" (16 - 64 provisioned concurrency). + If scale-to-zero is enabled, the lower bound of the provisioned concurrency for each workload size will be 0. + "enum": + - |- + Small + - |- + Medium + - |- + Large +github.com/databricks/databricks-sdk-go/service/serving.ServedModelInputWorkloadType: + "_": + "description": | + The workload type of the served model. The workload type selects which type of compute to use in the endpoint. The default value for this parameter is + "CPU". For deep learning workloads, GPU acceleration is available by selecting workload types like GPU_SMALL and others. + See the available [GPU types](https://docs.databricks.com/machine-learning/model-serving/create-manage-serving-endpoints.html#gpu-workload-types). + "enum": + - |- + CPU + - |- + GPU_SMALL + - |- + GPU_MEDIUM + - |- + GPU_LARGE + - |- + MULTIGPU_MEDIUM +github.com/databricks/databricks-sdk-go/service/serving.TrafficConfig: + "routes": + "description": |- + The list of routes that define traffic to each served entity. diff --git a/bundle/internal/schema/annotations_openapi_overrides.yml b/bundle/internal/schema/annotations_openapi_overrides.yml new file mode 100644 index 000000000..ef602d6ef --- /dev/null +++ b/bundle/internal/schema/annotations_openapi_overrides.yml @@ -0,0 +1,161 @@ +github.com/databricks/cli/bundle/config/resources.Cluster: + "data_security_mode": + "description": |- + PLACEHOLDER + "docker_image": + "description": |- + PLACEHOLDER + "kind": + "description": |- + PLACEHOLDER + "permissions": + "description": |- + PLACEHOLDER + "runtime_engine": + "description": |- + PLACEHOLDER + "workload_type": + "description": |- + PLACEHOLDER +github.com/databricks/cli/bundle/config/resources.Dashboard: + "embed_credentials": + "description": |- + PLACEHOLDER + "file_path": + "description": |- + PLACEHOLDER + "permissions": + "description": |- + PLACEHOLDER +github.com/databricks/cli/bundle/config/resources.Job: + "health": + "description": |- + PLACEHOLDER + "permissions": + "description": |- + PLACEHOLDER + "run_as": + "description": |- + PLACEHOLDER +github.com/databricks/cli/bundle/config/resources.MlflowExperiment: + "permissions": + "description": |- + PLACEHOLDER +github.com/databricks/cli/bundle/config/resources.MlflowModel: + "permissions": + "description": |- + PLACEHOLDER +github.com/databricks/cli/bundle/config/resources.ModelServingEndpoint: + "permissions": + "description": |- + PLACEHOLDER +github.com/databricks/cli/bundle/config/resources.Pipeline: + "permissions": + "description": |- + PLACEHOLDER +github.com/databricks/cli/bundle/config/resources.QualityMonitor: + "table_name": + "description": |- + PLACEHOLDER +github.com/databricks/cli/bundle/config/resources.RegisteredModel: + "grants": + "description": |- + PLACEHOLDER +github.com/databricks/cli/bundle/config/resources.Schema: + "grants": + "description": |- + PLACEHOLDER + "properties": + "description": |- + PLACEHOLDER +github.com/databricks/cli/bundle/config/resources.Volume: + "grants": + "description": |- + PLACEHOLDER + "volume_type": + "description": |- + PLACEHOLDER +github.com/databricks/databricks-sdk-go/service/compute.AwsAttributes: + "availability": + "description": |- + PLACEHOLDER + "ebs_volume_type": + "description": |- + PLACEHOLDER +github.com/databricks/databricks-sdk-go/service/compute.AzureAttributes: + "availability": + "description": |- + PLACEHOLDER +github.com/databricks/databricks-sdk-go/service/compute.ClusterSpec: + "data_security_mode": + "description": |- + PLACEHOLDER + "docker_image": + "description": |- + PLACEHOLDER + "kind": + "description": |- + PLACEHOLDER + "runtime_engine": + "description": |- + PLACEHOLDER + "workload_type": + "description": |- + PLACEHOLDER +github.com/databricks/databricks-sdk-go/service/compute.DockerImage: + "basic_auth": + "description": |- + PLACEHOLDER +github.com/databricks/databricks-sdk-go/service/compute.GcpAttributes: + "availability": + "description": |- + PLACEHOLDER +github.com/databricks/databricks-sdk-go/service/jobs.GitSource: + "git_snapshot": + "description": |- + PLACEHOLDER +github.com/databricks/databricks-sdk-go/service/jobs.JobEnvironment: + "spec": + "description": |- + PLACEHOLDER +github.com/databricks/databricks-sdk-go/service/jobs.JobsHealthRule: + "metric": + "description": |- + PLACEHOLDER + "op": + "description": |- + PLACEHOLDER +github.com/databricks/databricks-sdk-go/service/jobs.JobsHealthRules: + "rules": + "description": |- + PLACEHOLDER +github.com/databricks/databricks-sdk-go/service/jobs.RunJobTask: + "python_named_params": + "description": |- + PLACEHOLDER +github.com/databricks/databricks-sdk-go/service/jobs.Task: + "health": + "description": |- + PLACEHOLDER +github.com/databricks/databricks-sdk-go/service/jobs.TriggerSettings: + "table_update": + "description": |- + PLACEHOLDER +github.com/databricks/databricks-sdk-go/service/jobs.Webhook: + "id": + "description": |- + PLACEHOLDER +github.com/databricks/databricks-sdk-go/service/pipelines.CronTrigger: + "quartz_cron_schedule": + "description": |- + PLACEHOLDER + "timezone_id": + "description": |- + PLACEHOLDER +github.com/databricks/databricks-sdk-go/service/pipelines.PipelineTrigger: + "cron": + "description": |- + PLACEHOLDER + "manual": + "description": |- + PLACEHOLDER diff --git a/bundle/internal/schema/annotations_test.go b/bundle/internal/schema/annotations_test.go new file mode 100644 index 000000000..d7e2fea7c --- /dev/null +++ b/bundle/internal/schema/annotations_test.go @@ -0,0 +1,44 @@ +package main + +import ( + "testing" +) + +func TestConvertLinksToAbsoluteUrl(t *testing.T) { + tests := []struct { + input string + expected string + }{ + { + input: "", + expected: "", + }, + { + input: "Some text (not a link)", + expected: "Some text (not a link)", + }, + { + input: "This is a link to [_](#section)", + expected: "This is a link to [section](https://docs.databricks.com/dev-tools/bundles/reference.html#section)", + }, + { + input: "This is a link to [_](/dev-tools/bundles/resources.html#dashboard)", + expected: "This is a link to [dashboard](https://docs.databricks.com/dev-tools/bundles/resources.html#dashboard)", + }, + { + input: "This is a link to [_](/dev-tools/bundles/resources.html)", + expected: "This is a link to [link](https://docs.databricks.com/dev-tools/bundles/resources.html)", + }, + { + input: "This is a link to [external](https://external.com)", + expected: "This is a link to [external](https://external.com)", + }, + } + + for _, test := range tests { + result := convertLinksToAbsoluteUrl(test.input) + if result != test.expected { + t.Errorf("For input '%s', expected '%s', but got '%s'", test.input, test.expected, result) + } + } +} diff --git a/bundle/internal/schema/main.go b/bundle/internal/schema/main.go index 881ce3496..77927a966 100644 --- a/bundle/internal/schema/main.go +++ b/bundle/internal/schema/main.go @@ -5,6 +5,7 @@ import ( "fmt" "log" "os" + "path/filepath" "reflect" "github.com/databricks/cli/bundle/config" @@ -43,18 +44,20 @@ func addInterpolationPatterns(typ reflect.Type, s jsonschema.Schema) jsonschema. case jsonschema.ArrayType, jsonschema.ObjectType: // arrays and objects can have complex variable values specified. return jsonschema.Schema{ - AnyOf: []jsonschema.Schema{ + // OneOf is used because we don't expect more than 1 match and schema-based auto-complete works better with OneOf + OneOf: []jsonschema.Schema{ s, { Type: jsonschema.StringType, Pattern: interpolationPattern("var"), - }}, + }, + }, } case jsonschema.IntegerType, jsonschema.NumberType, jsonschema.BooleanType: // primitives can have variable values, or references like ${bundle.xyz} // or ${workspace.xyz} return jsonschema.Schema{ - AnyOf: []jsonschema.Schema{ + OneOf: []jsonschema.Schema{ s, {Type: jsonschema.StringType, Pattern: interpolationPattern("resources")}, {Type: jsonschema.StringType, Pattern: interpolationPattern("bundle")}, @@ -112,44 +115,67 @@ func makeVolumeTypeOptional(typ reflect.Type, s jsonschema.Schema) jsonschema.Sc } func main() { - if len(os.Args) != 2 { - fmt.Println("Usage: go run main.go ") + if len(os.Args) != 3 { + fmt.Println("Usage: go run main.go ") os.Exit(1) } + // Directory with annotation files + workdir := os.Args[1] // Output file, where the generated JSON schema will be written to. - outputFile := os.Args[1] + outputFile := os.Args[2] + + generateSchema(workdir, outputFile) +} + +func generateSchema(workdir, outputFile string) { + annotationsPath := filepath.Join(workdir, "annotations.yml") + annotationsOpenApiPath := filepath.Join(workdir, "annotations_openapi.yml") + annotationsOpenApiOverridesPath := filepath.Join(workdir, "annotations_openapi_overrides.yml") // Input file, the databricks openapi spec. inputFile := os.Getenv("DATABRICKS_OPENAPI_SPEC") - if inputFile == "" { - log.Fatal("DATABRICKS_OPENAPI_SPEC environment variable not set") + if inputFile != "" { + p, err := newParser(inputFile) + if err != nil { + log.Fatal(err) + } + fmt.Printf("Writing OpenAPI annotations to %s\n", annotationsOpenApiPath) + err = p.extractAnnotations(reflect.TypeOf(config.Root{}), annotationsOpenApiPath, annotationsOpenApiOverridesPath) + if err != nil { + log.Fatal(err) + } } - p, err := newParser(inputFile) + a, err := newAnnotationHandler([]string{annotationsOpenApiPath, annotationsOpenApiOverridesPath, annotationsPath}) if err != nil { log.Fatal(err) } // Generate the JSON schema from the bundle Go struct. s, err := jsonschema.FromType(reflect.TypeOf(config.Root{}), []func(reflect.Type, jsonschema.Schema) jsonschema.Schema{ - p.addDescriptions, - p.addEnums, removeJobsFields, makeVolumeTypeOptional, + a.addAnnotations, addInterpolationPatterns, }) if err != nil { log.Fatal(err) } + // Overwrite the input annotation file, adding missing annotations + err = a.syncWithMissingAnnotations(annotationsPath) + if err != nil { + log.Fatal(err) + } + b, err := json.MarshalIndent(s, "", " ") if err != nil { log.Fatal(err) } // Write the schema descriptions to the output file. - err = os.WriteFile(outputFile, b, 0644) + err = os.WriteFile(outputFile, b, 0o644) if err != nil { log.Fatal(err) } diff --git a/bundle/internal/schema/main_test.go b/bundle/internal/schema/main_test.go new file mode 100644 index 000000000..4eeb41d47 --- /dev/null +++ b/bundle/internal/schema/main_test.go @@ -0,0 +1,126 @@ +package main + +import ( + "bytes" + "fmt" + "io" + "os" + "path" + "reflect" + "strings" + "testing" + + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/dyn/merge" + "github.com/databricks/cli/libs/dyn/yamlloader" + "github.com/databricks/cli/libs/jsonschema" + "github.com/stretchr/testify/assert" + "gopkg.in/yaml.v3" +) + +func copyFile(src, dst string) error { + in, err := os.Open(src) + if err != nil { + return err + } + defer in.Close() + + out, err := os.Create(dst) + if err != nil { + return err + } + defer out.Close() + + _, err = io.Copy(out, in) + if err != nil { + return err + } + + return out.Close() +} + +// Checks whether descriptions are added for new config fields in the annotations.yml file +// If this test fails either manually add descriptions to the `annotations.yml` or do the following: +// 1. for fields described outside of CLI package fetch latest schema from the OpenAPI spec and add path to file to DATABRICKS_OPENAPI_SPEC env variable +// 2. run `make schema` from the repository root to add placeholder descriptions +// 2. replace all "PLACEHOLDER" values with the actual descriptions if possible +// 3. run `make schema` again to regenerate the schema with acutal descriptions +func TestRequiredAnnotationsForNewFields(t *testing.T) { + workdir := t.TempDir() + annotationsPath := path.Join(workdir, "annotations.yml") + annotationsOpenApiPath := path.Join(workdir, "annotations_openapi.yml") + annotationsOpenApiOverridesPath := path.Join(workdir, "annotations_openapi_overrides.yml") + + // Copy existing annotation files from the same folder as this test + err := copyFile("annotations.yml", annotationsPath) + assert.NoError(t, err) + err = copyFile("annotations_openapi.yml", annotationsOpenApiPath) + assert.NoError(t, err) + err = copyFile("annotations_openapi_overrides.yml", annotationsOpenApiOverridesPath) + assert.NoError(t, err) + + generateSchema(workdir, path.Join(t.TempDir(), "schema.json")) + + originalFile, err := os.ReadFile("annotations.yml") + assert.NoError(t, err) + currentFile, err := os.ReadFile(annotationsPath) + assert.NoError(t, err) + original, err := yamlloader.LoadYAML("", bytes.NewBuffer(originalFile)) + assert.NoError(t, err) + current, err := yamlloader.LoadYAML("", bytes.NewBuffer(currentFile)) + assert.NoError(t, err) + + // Collect added paths. + var updatedFieldPaths []string + _, err = merge.Override(original, current, merge.OverrideVisitor{ + VisitInsert: func(basePath dyn.Path, right dyn.Value) (dyn.Value, error) { + updatedFieldPaths = append(updatedFieldPaths, basePath.String()) + return right, nil + }, + }) + assert.NoError(t, err) + assert.Empty(t, updatedFieldPaths, fmt.Sprintf("Missing JSON-schema descriptions for new config fields in bundle/internal/schema/annotations.yml:\n%s", strings.Join(updatedFieldPaths, "\n"))) +} + +// Checks whether types in annotation files are still present in Config type +func TestNoDetachedAnnotations(t *testing.T) { + files := []string{ + "annotations.yml", + "annotations_openapi.yml", + "annotations_openapi_overrides.yml", + } + + types := map[string]bool{} + for _, file := range files { + annotations, err := getAnnotations(file) + assert.NoError(t, err) + for k := range annotations { + types[k] = false + } + } + + _, err := jsonschema.FromType(reflect.TypeOf(config.Root{}), []func(reflect.Type, jsonschema.Schema) jsonschema.Schema{ + func(typ reflect.Type, s jsonschema.Schema) jsonschema.Schema { + delete(types, getPath(typ)) + return s + }, + }) + assert.NoError(t, err) + + for typ := range types { + t.Errorf("Type `%s` in annotations file is not found in `root.Config` type", typ) + } + assert.Empty(t, types, "Detached annotations found, regenerate schema and check for package path changes") +} + +func getAnnotations(path string) (annotationFile, error) { + b, err := os.ReadFile(path) + if err != nil { + return nil, err + } + + var data annotationFile + err = yaml.Unmarshal(b, &data) + return data, err +} diff --git a/bundle/internal/schema/parser.go b/bundle/internal/schema/parser.go index ef3d6e719..e1d1a13dc 100644 --- a/bundle/internal/schema/parser.go +++ b/bundle/internal/schema/parser.go @@ -9,6 +9,7 @@ import ( "strings" "github.com/databricks/cli/libs/jsonschema" + "gopkg.in/yaml.v3" ) type Components struct { @@ -23,6 +24,8 @@ type openapiParser struct { ref map[string]jsonschema.Schema } +const RootTypeKey = "_" + func newParser(path string) (*openapiParser, error) { b, err := os.ReadFile(path) if err != nil { @@ -78,7 +81,11 @@ func (p *openapiParser) findRef(typ reflect.Type) (jsonschema.Schema, bool) { // Skip if the type is not in the openapi spec. _, ok := p.ref[k] if !ok { - continue + k = mapIncorrectTypNames(k) + _, ok = p.ref[k] + if !ok { + continue + } } // Return the first Go SDK type found in the openapi spec. @@ -88,36 +95,122 @@ func (p *openapiParser) findRef(typ reflect.Type) (jsonschema.Schema, bool) { return jsonschema.Schema{}, false } +// Fix inconsistent type names between the Go SDK and the OpenAPI spec. +// E.g. "serving.PaLmConfig" in the Go SDK is "serving.PaLMConfig" in the OpenAPI spec. +func mapIncorrectTypNames(ref string) string { + switch ref { + case "serving.PaLmConfig": + return "serving.PaLMConfig" + case "serving.OpenAiConfig": + return "serving.OpenAIConfig" + case "serving.GoogleCloudVertexAiConfig": + return "serving.GoogleCloudVertexAIConfig" + case "serving.Ai21LabsConfig": + return "serving.AI21LabsConfig" + default: + return ref + } +} + // Use the OpenAPI spec to load descriptions for the given type. -func (p *openapiParser) addDescriptions(typ reflect.Type, s jsonschema.Schema) jsonschema.Schema { - ref, ok := p.findRef(typ) - if !ok { - return s +func (p *openapiParser) extractAnnotations(typ reflect.Type, outputPath, overridesPath string) error { + annotations := annotationFile{} + overrides := annotationFile{} + + b, err := os.ReadFile(overridesPath) + if err != nil { + return err + } + err = yaml.Unmarshal(b, &overrides) + if err != nil { + return err + } + if overrides == nil { + overrides = annotationFile{} } - s.Description = ref.Description - for k, v := range s.Properties { - if refProp, ok := ref.Properties[k]; ok { - v.Description = refProp.Description - } + _, err = jsonschema.FromType(typ, []func(reflect.Type, jsonschema.Schema) jsonschema.Schema{ + func(typ reflect.Type, s jsonschema.Schema) jsonschema.Schema { + ref, ok := p.findRef(typ) + if !ok { + return s + } + + basePath := getPath(typ) + pkg := map[string]annotation{} + annotations[basePath] = pkg + + if ref.Description != "" || ref.Enum != nil { + pkg[RootTypeKey] = annotation{Description: ref.Description, Enum: ref.Enum} + } + + for k := range s.Properties { + if refProp, ok := ref.Properties[k]; ok { + pkg[k] = annotation{Description: refProp.Description, Enum: refProp.Enum} + if refProp.Description == "" { + addEmptyOverride(k, basePath, overrides) + } + } else { + addEmptyOverride(k, basePath, overrides) + } + } + return s + }, + }) + if err != nil { + return err } - return s + err = saveYamlWithStyle(overridesPath, overrides) + if err != nil { + return err + } + err = saveYamlWithStyle(outputPath, annotations) + if err != nil { + return err + } + err = prependCommentToFile(outputPath, "# This file is auto-generated. DO NOT EDIT.\n") + if err != nil { + return err + } + return nil } -// Use the OpenAPI spec add enum values for the given type. -func (p *openapiParser) addEnums(typ reflect.Type, s jsonschema.Schema) jsonschema.Schema { - ref, ok := p.findRef(typ) - if !ok { - return s +func prependCommentToFile(outputPath, comment string) error { + b, err := os.ReadFile(outputPath) + if err != nil { + return err } - - s.Enum = append(s.Enum, ref.Enum...) - for k, v := range s.Properties { - if refProp, ok := ref.Properties[k]; ok { - v.Enum = append(v.Enum, refProp.Enum...) - } + f, err := os.OpenFile(outputPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0o644) + if err != nil { + return err } + defer f.Close() - return s + _, err = f.WriteString(comment) + if err != nil { + return err + } + _, err = f.Write(b) + return err +} + +func addEmptyOverride(key, pkg string, overridesFile annotationFile) { + if overridesFile[pkg] == nil { + overridesFile[pkg] = map[string]annotation{} + } + + overrides := overridesFile[pkg] + if overrides[key].Description == "" { + overrides[key] = annotation{Description: Placeholder} + } + + a, ok := overrides[key] + if !ok { + a = annotation{} + } + if a.Description == "" { + a.Description = Placeholder + } + overrides[key] = a } diff --git a/bundle/internal/schema/testdata/pass/target_variable.yml b/bundle/internal/schema/testdata/pass/target_variable.yml new file mode 100644 index 000000000..34af94658 --- /dev/null +++ b/bundle/internal/schema/testdata/pass/target_variable.yml @@ -0,0 +1,5 @@ +targets: + production: + variables: + myvar: + default: true diff --git a/bundle/internal/tf/codegen/generator/walker.go b/bundle/internal/tf/codegen/generator/walker.go index 2ed044c3d..0e9d73c4e 100644 --- a/bundle/internal/tf/codegen/generator/walker.go +++ b/bundle/internal/tf/codegen/generator/walker.go @@ -2,9 +2,8 @@ package generator import ( "fmt" - "strings" - "slices" + "strings" tfjson "github.com/hashicorp/terraform-json" "github.com/iancoleman/strcase" @@ -70,6 +69,25 @@ func nestedBlockKeys(block *tfjson.SchemaBlock) []string { return keys } +func nestedField(name []string, k string, isRef bool) field { + // Collect field properties. + fieldName := strcase.ToCamel(k) + fieldTypePrefix := "" + if isRef { + fieldTypePrefix = "*" + } else { + fieldTypePrefix = "[]" + } + fieldType := fmt.Sprintf("%s%s", fieldTypePrefix, strings.Join(append(name, strcase.ToCamel(k)), "")) + fieldTag := fmt.Sprintf("%s,omitempty", k) + + return field{ + Name: fieldName, + Type: fieldType, + Tag: fieldTag, + } +} + func (w *walker) walk(block *tfjson.SchemaBlock, name []string) error { // Produce nested types before this block itself. // This ensures types are defined before they are referenced. @@ -91,10 +109,24 @@ func (w *walker) walk(block *tfjson.SchemaBlock, name []string) error { v := block.Attributes[k] // Assert the attribute type is always set. - if v.AttributeType == cty.NilType { + if v.AttributeType == cty.NilType && v.AttributeNestedType == nil { return fmt.Errorf("unexpected nil type for attribute %s", k) } + // If there is a nested type, walk it and continue to next attribute. + if v.AttributeNestedType != nil { + nestedBlock := &tfjson.SchemaBlock{ + Attributes: v.AttributeNestedType.Attributes, + } + err := w.walk(nestedBlock, append(name, strcase.ToCamel(k))) + if err != nil { + return err + } + // Append to list of fields for type. + typ.Fields = append(typ.Fields, nestedField(name, k, v.AttributeNestedType.NestingMode == tfjson.SchemaNestingModeSingle)) + continue + } + // Collect field properties. fieldName := strcase.ToCamel(k) fieldType := processAttributeType(v.AttributeType) @@ -117,24 +149,8 @@ func (w *walker) walk(block *tfjson.SchemaBlock, name []string) error { // Declare nested blocks. for _, k := range nestedBlockKeys(block) { v := block.NestedBlocks[k] - - // Collect field properties. - fieldName := strcase.ToCamel(k) - fieldTypePrefix := "" - if v.MaxItems == 1 { - fieldTypePrefix = "*" - } else { - fieldTypePrefix = "[]" - } - fieldType := fmt.Sprintf("%s%s", fieldTypePrefix, strings.Join(append(name, strcase.ToCamel(k)), "")) - fieldTag := fmt.Sprintf("%s,omitempty", k) - // Append to list of fields for type. - typ.Fields = append(typ.Fields, field{ - Name: fieldName, - Type: fieldType, - Tag: fieldTag, - }) + typ.Fields = append(typ.Fields, nestedField(name, k, v.MaxItems == 1)) } // Append type to list of structs. diff --git a/bundle/internal/tf/codegen/go.mod b/bundle/internal/tf/codegen/go.mod index 67ac4bbc7..e9fc83615 100644 --- a/bundle/internal/tf/codegen/go.mod +++ b/bundle/internal/tf/codegen/go.mod @@ -1,24 +1,27 @@ module github.com/databricks/cli/bundle/internal/tf/codegen -go 1.21 +go 1.23 + +toolchain go1.23.4 require ( - github.com/hashicorp/go-version v1.6.0 - github.com/hashicorp/hc-install v0.6.3 - github.com/hashicorp/terraform-exec v0.20.0 - github.com/hashicorp/terraform-json v0.21.0 + github.com/hashicorp/go-version v1.7.0 + github.com/hashicorp/hc-install v0.9.0 + github.com/hashicorp/terraform-exec v0.21.0 + github.com/hashicorp/terraform-json v0.23.0 github.com/iancoleman/strcase v0.3.0 - github.com/zclconf/go-cty v1.14.2 - golang.org/x/exp v0.0.0-20240213143201-ec583247a57a + github.com/zclconf/go-cty v1.15.1 + golang.org/x/exp v0.0.0-20241204233417-43b7b7cde48d ) require ( - github.com/ProtonMail/go-crypto v1.1.0-alpha.0 // indirect + github.com/ProtonMail/go-crypto v1.1.3 // indirect github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect - github.com/cloudflare/circl v1.3.7 // indirect + github.com/cloudflare/circl v1.5.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect - golang.org/x/crypto v0.19.0 // indirect - golang.org/x/mod v0.15.0 // indirect - golang.org/x/sys v0.17.0 // indirect - golang.org/x/text v0.14.0 // indirect + github.com/hashicorp/go-retryablehttp v0.7.7 // indirect + golang.org/x/crypto v0.31.0 // indirect + golang.org/x/mod v0.22.0 // indirect + golang.org/x/sys v0.28.0 // indirect + golang.org/x/text v0.21.0 // indirect ) diff --git a/bundle/internal/tf/codegen/go.sum b/bundle/internal/tf/codegen/go.sum index 7a4023ba5..1ce56777f 100644 --- a/bundle/internal/tf/codegen/go.sum +++ b/bundle/internal/tf/codegen/go.sum @@ -2,67 +2,79 @@ dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= -github.com/ProtonMail/go-crypto v1.1.0-alpha.0 h1:nHGfwXmFvJrSR9xu8qL7BkO4DqTHXE9N5vPhgY2I+j0= -github.com/ProtonMail/go-crypto v1.1.0-alpha.0/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE= +github.com/ProtonMail/go-crypto v1.1.3 h1:nRBOetoydLeUb4nHajyO2bKqMLfWQ/ZPwkXqXxPxCFk= +github.com/ProtonMail/go-crypto v1.1.3/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE= github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew1u1fNQOlOtuGxQY= github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4= -github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU= -github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA= +github.com/cloudflare/circl v1.5.0 h1:hxIWksrX6XN5a1L2TI/h53AGPhNHoUBo+TD1ms9+pys= +github.com/cloudflare/circl v1.5.0/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= +github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= +github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic= github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU= github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgFRpCtpDCKow= -github.com/go-git/go-git/v5 v5.11.0 h1:XIZc1p+8YzypNr34itUfSvYJcv+eYdTnTvOZ2vD3cA4= -github.com/go-git/go-git/v5 v5.11.0/go.mod h1:6GFcX2P3NM7FPBfpePbpLd21XxsgdAt+lKqXmCUiUCY= +github.com/go-git/go-git/v5 v5.12.0 h1:7Md+ndsjrzZxbddRDZjF14qK+NN56sy6wkqaVrjZtys= +github.com/go-git/go-git/v5 v5.12.0/go.mod h1:FTM9VKtnI2m65hNI/TenDDDnUf2Q9FHnXYjuz9i5OEY= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= -github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= -github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/hc-install v0.6.3 h1:yE/r1yJvWbtrJ0STwScgEnCanb0U9v7zp0Gbkmcoxqs= -github.com/hashicorp/hc-install v0.6.3/go.mod h1:KamGdbodYzlufbWh4r9NRo8y6GLHWZP2GBtdnms1Ln0= -github.com/hashicorp/terraform-exec v0.20.0 h1:DIZnPsqzPGuUnq6cH8jWcPunBfY+C+M8JyYF3vpnuEo= -github.com/hashicorp/terraform-exec v0.20.0/go.mod h1:ckKGkJWbsNqFKV1itgMnE0hY9IYf1HoiekpuN0eWoDw= -github.com/hashicorp/terraform-json v0.21.0 h1:9NQxbLNqPbEMze+S6+YluEdXgJmhQykRyRNd+zTI05U= -github.com/hashicorp/terraform-json v0.21.0/go.mod h1:qdeBs11ovMzo5puhrRibdD6d2Dq6TyE/28JiU4tIQxk= +github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= +github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU= +github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk= +github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= +github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/hc-install v0.9.0 h1:2dIk8LcvANwtv3QZLckxcjyF5w8KVtiMxu6G6eLhghE= +github.com/hashicorp/hc-install v0.9.0/go.mod h1:+6vOP+mf3tuGgMApVYtmsnDoKWMDcFXeTxCACYZ8SFg= +github.com/hashicorp/terraform-exec v0.21.0 h1:uNkLAe95ey5Uux6KJdua6+cv8asgILFVWkd/RG0D2XQ= +github.com/hashicorp/terraform-exec v0.21.0/go.mod h1:1PPeMYou+KDUSSeRE9szMZ/oHf4fYUmB923Wzbq1ICg= +github.com/hashicorp/terraform-json v0.23.0 h1:sniCkExU4iKtTADReHzACkk8fnpQXrdD2xoR+lppBkI= +github.com/hashicorp/terraform-json v0.23.0/go.mod h1:MHdXbBAbSg0GvzuWazEGKAn/cyNfIB7mN6y7KJN6y2c= github.com/iancoleman/strcase v0.3.0 h1:nTXanmYxhfFAMjZL34Ov6gkzEsSJZ5DbhxWjvSASxEI= github.com/iancoleman/strcase v0.3.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4= github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4= github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI= -github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= -github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= -github.com/skeema/knownhosts v1.2.1 h1:SHWdIUa82uGZz+F+47k8SY4QhhI291cXCpopT1lK2AQ= -github.com/skeema/knownhosts v1.2.1/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo= +github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8= +github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= +github.com/skeema/knownhosts v1.2.2 h1:Iug2P4fLmDw9f41PB6thxUkNUkJzB5i+1/exaj40L3A= +github.com/skeema/knownhosts v1.2.2/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo= github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= -github.com/zclconf/go-cty v1.14.2 h1:kTG7lqmBou0Zkx35r6HJHUQTvaRPr5bIAf3AoHS0izI= -github.com/zclconf/go-cty v1.14.2/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= -golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo= -golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= -golang.org/x/exp v0.0.0-20240213143201-ec583247a57a h1:HinSgX1tJRX3KsL//Gxynpw5CTOAIPhgL4W8PNiIpVE= -golang.org/x/exp v0.0.0-20240213143201-ec583247a57a/go.mod h1:CxmFvTBINI24O/j8iY7H1xHzx2i4OsyguNBmN/uPtqc= -golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8= -golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= -golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= -golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ= -golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg= +github.com/zclconf/go-cty v1.15.1 h1:RgQYm4j2EvoBRXOPxhUvxPzRrGDo1eCOhHXuGfrj5S0= +github.com/zclconf/go-cty v1.15.1/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= +golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= +golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= +golang.org/x/exp v0.0.0-20241204233417-43b7b7cde48d h1:0olWaB5pg3+oychR51GUVCEsGkeCU/2JxjBgIo4f3M0= +golang.org/x/exp v0.0.0-20241204233417-43b7b7cde48d/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c= +golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4= +golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= +golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= +golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= +golang.org/x/tools v0.28.0 h1:WuB6qZ4RPCQo5aP3WdKZS7i595EdWqWR8vqJTlwTVK8= +golang.org/x/tools v0.28.0/go.mod h1:dcIOrVd3mfQKTgrDVQHqCPMWy6lnhfhtX3hLXYVLfRw= gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= diff --git a/bundle/internal/tf/codegen/schema/version.go b/bundle/internal/tf/codegen/schema/version.go index a778e0232..27c4b16cd 100644 --- a/bundle/internal/tf/codegen/schema/version.go +++ b/bundle/internal/tf/codegen/schema/version.go @@ -1,3 +1,3 @@ package schema -const ProviderVersion = "1.59.0" +const ProviderVersion = "1.62.0" diff --git a/bundle/internal/tf/codegen/templates/root.go.tmpl b/bundle/internal/tf/codegen/templates/root.go.tmpl index e03e978f0..b5c53c161 100644 --- a/bundle/internal/tf/codegen/templates/root.go.tmpl +++ b/bundle/internal/tf/codegen/templates/root.go.tmpl @@ -25,9 +25,9 @@ const ProviderVersion = "{{ .ProviderVersion }}" func NewRoot() *Root { return &Root{ - Terraform: map[string]interface{}{ - "required_providers": map[string]interface{}{ - "databricks": map[string]interface{}{ + Terraform: map[string]any{ + "required_providers": map[string]any{ + "databricks": map[string]any{ "source": ProviderSource, "version": ProviderVersion, }, diff --git a/bundle/internal/tf/schema/data_source_app.go b/bundle/internal/tf/schema/data_source_app.go new file mode 100644 index 000000000..9b4ef077e --- /dev/null +++ b/bundle/internal/tf/schema/data_source_app.go @@ -0,0 +1,107 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. + +package schema + +type DataSourceAppAppActiveDeploymentDeploymentArtifacts struct { + SourceCodePath string `json:"source_code_path,omitempty"` +} + +type DataSourceAppAppActiveDeploymentStatus struct { + Message string `json:"message,omitempty"` + State string `json:"state,omitempty"` +} + +type DataSourceAppAppActiveDeployment struct { + CreateTime string `json:"create_time,omitempty"` + Creator string `json:"creator,omitempty"` + DeploymentArtifacts *DataSourceAppAppActiveDeploymentDeploymentArtifacts `json:"deployment_artifacts,omitempty"` + DeploymentId string `json:"deployment_id,omitempty"` + Mode string `json:"mode,omitempty"` + SourceCodePath string `json:"source_code_path,omitempty"` + Status *DataSourceAppAppActiveDeploymentStatus `json:"status,omitempty"` + UpdateTime string `json:"update_time,omitempty"` +} + +type DataSourceAppAppAppStatus struct { + Message string `json:"message,omitempty"` + State string `json:"state,omitempty"` +} + +type DataSourceAppAppComputeStatus struct { + Message string `json:"message,omitempty"` + State string `json:"state,omitempty"` +} + +type DataSourceAppAppPendingDeploymentDeploymentArtifacts struct { + SourceCodePath string `json:"source_code_path,omitempty"` +} + +type DataSourceAppAppPendingDeploymentStatus struct { + Message string `json:"message,omitempty"` + State string `json:"state,omitempty"` +} + +type DataSourceAppAppPendingDeployment struct { + CreateTime string `json:"create_time,omitempty"` + Creator string `json:"creator,omitempty"` + DeploymentArtifacts *DataSourceAppAppPendingDeploymentDeploymentArtifacts `json:"deployment_artifacts,omitempty"` + DeploymentId string `json:"deployment_id,omitempty"` + Mode string `json:"mode,omitempty"` + SourceCodePath string `json:"source_code_path,omitempty"` + Status *DataSourceAppAppPendingDeploymentStatus `json:"status,omitempty"` + UpdateTime string `json:"update_time,omitempty"` +} + +type DataSourceAppAppResourcesJob struct { + Id string `json:"id"` + Permission string `json:"permission"` +} + +type DataSourceAppAppResourcesSecret struct { + Key string `json:"key"` + Permission string `json:"permission"` + Scope string `json:"scope"` +} + +type DataSourceAppAppResourcesServingEndpoint struct { + Name string `json:"name"` + Permission string `json:"permission"` +} + +type DataSourceAppAppResourcesSqlWarehouse struct { + Id string `json:"id"` + Permission string `json:"permission"` +} + +type DataSourceAppAppResources struct { + Description string `json:"description,omitempty"` + Job *DataSourceAppAppResourcesJob `json:"job,omitempty"` + Name string `json:"name"` + Secret *DataSourceAppAppResourcesSecret `json:"secret,omitempty"` + ServingEndpoint *DataSourceAppAppResourcesServingEndpoint `json:"serving_endpoint,omitempty"` + SqlWarehouse *DataSourceAppAppResourcesSqlWarehouse `json:"sql_warehouse,omitempty"` +} + +type DataSourceAppApp struct { + ActiveDeployment *DataSourceAppAppActiveDeployment `json:"active_deployment,omitempty"` + AppStatus *DataSourceAppAppAppStatus `json:"app_status,omitempty"` + ComputeStatus *DataSourceAppAppComputeStatus `json:"compute_status,omitempty"` + CreateTime string `json:"create_time,omitempty"` + Creator string `json:"creator,omitempty"` + DefaultSourceCodePath string `json:"default_source_code_path,omitempty"` + Description string `json:"description,omitempty"` + Name string `json:"name"` + PendingDeployment *DataSourceAppAppPendingDeployment `json:"pending_deployment,omitempty"` + Resources []DataSourceAppAppResources `json:"resources,omitempty"` + ServicePrincipalClientId string `json:"service_principal_client_id,omitempty"` + ServicePrincipalId int `json:"service_principal_id,omitempty"` + ServicePrincipalName string `json:"service_principal_name,omitempty"` + UpdateTime string `json:"update_time,omitempty"` + Updater string `json:"updater,omitempty"` + Url string `json:"url,omitempty"` +} + +type DataSourceApp struct { + App *DataSourceAppApp `json:"app,omitempty"` + Name string `json:"name"` +} diff --git a/bundle/internal/tf/schema/data_source_apps.go b/bundle/internal/tf/schema/data_source_apps.go new file mode 100644 index 000000000..dd381eabf --- /dev/null +++ b/bundle/internal/tf/schema/data_source_apps.go @@ -0,0 +1,106 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. + +package schema + +type DataSourceAppsAppActiveDeploymentDeploymentArtifacts struct { + SourceCodePath string `json:"source_code_path,omitempty"` +} + +type DataSourceAppsAppActiveDeploymentStatus struct { + Message string `json:"message,omitempty"` + State string `json:"state,omitempty"` +} + +type DataSourceAppsAppActiveDeployment struct { + CreateTime string `json:"create_time,omitempty"` + Creator string `json:"creator,omitempty"` + DeploymentArtifacts *DataSourceAppsAppActiveDeploymentDeploymentArtifacts `json:"deployment_artifacts,omitempty"` + DeploymentId string `json:"deployment_id,omitempty"` + Mode string `json:"mode,omitempty"` + SourceCodePath string `json:"source_code_path,omitempty"` + Status *DataSourceAppsAppActiveDeploymentStatus `json:"status,omitempty"` + UpdateTime string `json:"update_time,omitempty"` +} + +type DataSourceAppsAppAppStatus struct { + Message string `json:"message,omitempty"` + State string `json:"state,omitempty"` +} + +type DataSourceAppsAppComputeStatus struct { + Message string `json:"message,omitempty"` + State string `json:"state,omitempty"` +} + +type DataSourceAppsAppPendingDeploymentDeploymentArtifacts struct { + SourceCodePath string `json:"source_code_path,omitempty"` +} + +type DataSourceAppsAppPendingDeploymentStatus struct { + Message string `json:"message,omitempty"` + State string `json:"state,omitempty"` +} + +type DataSourceAppsAppPendingDeployment struct { + CreateTime string `json:"create_time,omitempty"` + Creator string `json:"creator,omitempty"` + DeploymentArtifacts *DataSourceAppsAppPendingDeploymentDeploymentArtifacts `json:"deployment_artifacts,omitempty"` + DeploymentId string `json:"deployment_id,omitempty"` + Mode string `json:"mode,omitempty"` + SourceCodePath string `json:"source_code_path,omitempty"` + Status *DataSourceAppsAppPendingDeploymentStatus `json:"status,omitempty"` + UpdateTime string `json:"update_time,omitempty"` +} + +type DataSourceAppsAppResourcesJob struct { + Id string `json:"id"` + Permission string `json:"permission"` +} + +type DataSourceAppsAppResourcesSecret struct { + Key string `json:"key"` + Permission string `json:"permission"` + Scope string `json:"scope"` +} + +type DataSourceAppsAppResourcesServingEndpoint struct { + Name string `json:"name"` + Permission string `json:"permission"` +} + +type DataSourceAppsAppResourcesSqlWarehouse struct { + Id string `json:"id"` + Permission string `json:"permission"` +} + +type DataSourceAppsAppResources struct { + Description string `json:"description,omitempty"` + Job *DataSourceAppsAppResourcesJob `json:"job,omitempty"` + Name string `json:"name"` + Secret *DataSourceAppsAppResourcesSecret `json:"secret,omitempty"` + ServingEndpoint *DataSourceAppsAppResourcesServingEndpoint `json:"serving_endpoint,omitempty"` + SqlWarehouse *DataSourceAppsAppResourcesSqlWarehouse `json:"sql_warehouse,omitempty"` +} + +type DataSourceAppsApp struct { + ActiveDeployment *DataSourceAppsAppActiveDeployment `json:"active_deployment,omitempty"` + AppStatus *DataSourceAppsAppAppStatus `json:"app_status,omitempty"` + ComputeStatus *DataSourceAppsAppComputeStatus `json:"compute_status,omitempty"` + CreateTime string `json:"create_time,omitempty"` + Creator string `json:"creator,omitempty"` + DefaultSourceCodePath string `json:"default_source_code_path,omitempty"` + Description string `json:"description,omitempty"` + Name string `json:"name"` + PendingDeployment *DataSourceAppsAppPendingDeployment `json:"pending_deployment,omitempty"` + Resources []DataSourceAppsAppResources `json:"resources,omitempty"` + ServicePrincipalClientId string `json:"service_principal_client_id,omitempty"` + ServicePrincipalId int `json:"service_principal_id,omitempty"` + ServicePrincipalName string `json:"service_principal_name,omitempty"` + UpdateTime string `json:"update_time,omitempty"` + Updater string `json:"updater,omitempty"` + Url string `json:"url,omitempty"` +} + +type DataSourceApps struct { + App []DataSourceAppsApp `json:"app,omitempty"` +} diff --git a/bundle/internal/tf/schema/data_source_cluster.go b/bundle/internal/tf/schema/data_source_cluster.go index 94d67bbfa..38cb534f2 100644 --- a/bundle/internal/tf/schema/data_source_cluster.go +++ b/bundle/internal/tf/schema/data_source_cluster.go @@ -317,6 +317,8 @@ type DataSourceClusterClusterInfoSpec struct { EnableLocalDiskEncryption bool `json:"enable_local_disk_encryption,omitempty"` IdempotencyToken string `json:"idempotency_token,omitempty"` InstancePoolId string `json:"instance_pool_id,omitempty"` + IsSingleNode bool `json:"is_single_node,omitempty"` + Kind string `json:"kind,omitempty"` NodeTypeId string `json:"node_type_id,omitempty"` NumWorkers int `json:"num_workers,omitempty"` PolicyId string `json:"policy_id,omitempty"` @@ -326,6 +328,7 @@ type DataSourceClusterClusterInfoSpec struct { SparkEnvVars map[string]string `json:"spark_env_vars,omitempty"` SparkVersion string `json:"spark_version"` SshPublicKeys []string `json:"ssh_public_keys,omitempty"` + UseMlRuntime bool `json:"use_ml_runtime,omitempty"` Autoscale *DataSourceClusterClusterInfoSpecAutoscale `json:"autoscale,omitempty"` AwsAttributes *DataSourceClusterClusterInfoSpecAwsAttributes `json:"aws_attributes,omitempty"` AzureAttributes *DataSourceClusterClusterInfoSpecAzureAttributes `json:"azure_attributes,omitempty"` @@ -369,7 +372,9 @@ type DataSourceClusterClusterInfo struct { EnableElasticDisk bool `json:"enable_elastic_disk,omitempty"` EnableLocalDiskEncryption bool `json:"enable_local_disk_encryption,omitempty"` InstancePoolId string `json:"instance_pool_id,omitempty"` + IsSingleNode bool `json:"is_single_node,omitempty"` JdbcPort int `json:"jdbc_port,omitempty"` + Kind string `json:"kind,omitempty"` LastRestartedTime int `json:"last_restarted_time,omitempty"` LastStateLossTime int `json:"last_state_loss_time,omitempty"` NodeTypeId string `json:"node_type_id,omitempty"` @@ -386,6 +391,7 @@ type DataSourceClusterClusterInfo struct { State string `json:"state,omitempty"` StateMessage string `json:"state_message,omitempty"` TerminatedTime int `json:"terminated_time,omitempty"` + UseMlRuntime bool `json:"use_ml_runtime,omitempty"` Autoscale *DataSourceClusterClusterInfoAutoscale `json:"autoscale,omitempty"` AwsAttributes *DataSourceClusterClusterInfoAwsAttributes `json:"aws_attributes,omitempty"` AzureAttributes *DataSourceClusterClusterInfoAzureAttributes `json:"azure_attributes,omitempty"` diff --git a/bundle/internal/tf/schema/data_source_functions.go b/bundle/internal/tf/schema/data_source_functions.go index 6085d7522..416db8fc8 100644 --- a/bundle/internal/tf/schema/data_source_functions.go +++ b/bundle/internal/tf/schema/data_source_functions.go @@ -69,6 +69,7 @@ type DataSourceFunctionsFunctions struct { FullDataType string `json:"full_data_type,omitempty"` FullName string `json:"full_name,omitempty"` FunctionId string `json:"function_id,omitempty"` + InputParams []DataSourceFunctionsFunctionsInputParams `json:"input_params,omitempty"` IsDeterministic bool `json:"is_deterministic,omitempty"` IsNullCall bool `json:"is_null_call,omitempty"` MetastoreId string `json:"metastore_id,omitempty"` @@ -76,8 +77,10 @@ type DataSourceFunctionsFunctions struct { Owner string `json:"owner,omitempty"` ParameterStyle string `json:"parameter_style,omitempty"` Properties string `json:"properties,omitempty"` + ReturnParams []DataSourceFunctionsFunctionsReturnParams `json:"return_params,omitempty"` RoutineBody string `json:"routine_body,omitempty"` RoutineDefinition string `json:"routine_definition,omitempty"` + RoutineDependencies []DataSourceFunctionsFunctionsRoutineDependencies `json:"routine_dependencies,omitempty"` SchemaName string `json:"schema_name,omitempty"` SecurityType string `json:"security_type,omitempty"` SpecificName string `json:"specific_name,omitempty"` @@ -85,14 +88,11 @@ type DataSourceFunctionsFunctions struct { SqlPath string `json:"sql_path,omitempty"` UpdatedAt int `json:"updated_at,omitempty"` UpdatedBy string `json:"updated_by,omitempty"` - InputParams []DataSourceFunctionsFunctionsInputParams `json:"input_params,omitempty"` - ReturnParams []DataSourceFunctionsFunctionsReturnParams `json:"return_params,omitempty"` - RoutineDependencies []DataSourceFunctionsFunctionsRoutineDependencies `json:"routine_dependencies,omitempty"` } type DataSourceFunctions struct { CatalogName string `json:"catalog_name"` + Functions []DataSourceFunctionsFunctions `json:"functions,omitempty"` IncludeBrowse bool `json:"include_browse,omitempty"` SchemaName string `json:"schema_name"` - Functions []DataSourceFunctionsFunctions `json:"functions,omitempty"` } diff --git a/bundle/internal/tf/schema/data_source_jobs.go b/bundle/internal/tf/schema/data_source_jobs.go index 98533c0c8..643f7a9f9 100644 --- a/bundle/internal/tf/schema/data_source_jobs.go +++ b/bundle/internal/tf/schema/data_source_jobs.go @@ -3,6 +3,7 @@ package schema type DataSourceJobs struct { - Id string `json:"id,omitempty"` - Ids map[string]string `json:"ids,omitempty"` + Id string `json:"id,omitempty"` + Ids map[string]string `json:"ids,omitempty"` + JobNameContains string `json:"job_name_contains,omitempty"` } diff --git a/bundle/internal/tf/schema/data_source_notification_destinations.go b/bundle/internal/tf/schema/data_source_notification_destinations.go index c95ad6db9..8447b497b 100644 --- a/bundle/internal/tf/schema/data_source_notification_destinations.go +++ b/bundle/internal/tf/schema/data_source_notification_destinations.go @@ -10,6 +10,6 @@ type DataSourceNotificationDestinationsNotificationDestinations struct { type DataSourceNotificationDestinations struct { DisplayNameContains string `json:"display_name_contains,omitempty"` - Type string `json:"type,omitempty"` NotificationDestinations []DataSourceNotificationDestinationsNotificationDestinations `json:"notification_destinations,omitempty"` + Type string `json:"type,omitempty"` } diff --git a/bundle/internal/tf/schema/data_source_registered_model.go b/bundle/internal/tf/schema/data_source_registered_model.go index e19e0849a..41d69ff8f 100644 --- a/bundle/internal/tf/schema/data_source_registered_model.go +++ b/bundle/internal/tf/schema/data_source_registered_model.go @@ -8,6 +8,7 @@ type DataSourceRegisteredModelModelInfoAliases struct { } type DataSourceRegisteredModelModelInfo struct { + Aliases []DataSourceRegisteredModelModelInfoAliases `json:"aliases,omitempty"` BrowseOnly bool `json:"browse_only,omitempty"` CatalogName string `json:"catalog_name,omitempty"` Comment string `json:"comment,omitempty"` @@ -21,7 +22,6 @@ type DataSourceRegisteredModelModelInfo struct { StorageLocation string `json:"storage_location,omitempty"` UpdatedAt int `json:"updated_at,omitempty"` UpdatedBy string `json:"updated_by,omitempty"` - Aliases []DataSourceRegisteredModelModelInfoAliases `json:"aliases,omitempty"` } type DataSourceRegisteredModel struct { diff --git a/bundle/internal/tf/schema/data_source_registered_model_versions.go b/bundle/internal/tf/schema/data_source_registered_model_versions.go index f70e58f85..1a670dfbc 100644 --- a/bundle/internal/tf/schema/data_source_registered_model_versions.go +++ b/bundle/internal/tf/schema/data_source_registered_model_versions.go @@ -25,6 +25,7 @@ type DataSourceRegisteredModelVersionsModelVersionsModelVersionDependencies stru } type DataSourceRegisteredModelVersionsModelVersions struct { + Aliases []DataSourceRegisteredModelVersionsModelVersionsAliases `json:"aliases,omitempty"` BrowseOnly bool `json:"browse_only,omitempty"` CatalogName string `json:"catalog_name,omitempty"` Comment string `json:"comment,omitempty"` @@ -33,6 +34,7 @@ type DataSourceRegisteredModelVersionsModelVersions struct { Id string `json:"id,omitempty"` MetastoreId string `json:"metastore_id,omitempty"` ModelName string `json:"model_name,omitempty"` + ModelVersionDependencies []DataSourceRegisteredModelVersionsModelVersionsModelVersionDependencies `json:"model_version_dependencies,omitempty"` RunId string `json:"run_id,omitempty"` RunWorkspaceId int `json:"run_workspace_id,omitempty"` SchemaName string `json:"schema_name,omitempty"` @@ -42,8 +44,6 @@ type DataSourceRegisteredModelVersionsModelVersions struct { UpdatedAt int `json:"updated_at,omitempty"` UpdatedBy string `json:"updated_by,omitempty"` Version int `json:"version,omitempty"` - Aliases []DataSourceRegisteredModelVersionsModelVersionsAliases `json:"aliases,omitempty"` - ModelVersionDependencies []DataSourceRegisteredModelVersionsModelVersionsModelVersionDependencies `json:"model_version_dependencies,omitempty"` } type DataSourceRegisteredModelVersions struct { diff --git a/bundle/internal/tf/schema/data_source_serving_endpoints.go b/bundle/internal/tf/schema/data_source_serving_endpoints.go index 028121b5a..bdfd778e0 100644 --- a/bundle/internal/tf/schema/data_source_serving_endpoints.go +++ b/bundle/internal/tf/schema/data_source_serving_endpoints.go @@ -8,9 +8,9 @@ type DataSourceServingEndpointsEndpointsAiGatewayGuardrailsInputPii struct { type DataSourceServingEndpointsEndpointsAiGatewayGuardrailsInput struct { InvalidKeywords []string `json:"invalid_keywords,omitempty"` + Pii []DataSourceServingEndpointsEndpointsAiGatewayGuardrailsInputPii `json:"pii,omitempty"` Safety bool `json:"safety,omitempty"` ValidTopics []string `json:"valid_topics,omitempty"` - Pii []DataSourceServingEndpointsEndpointsAiGatewayGuardrailsInputPii `json:"pii,omitempty"` } type DataSourceServingEndpointsEndpointsAiGatewayGuardrailsOutputPii struct { @@ -19,9 +19,9 @@ type DataSourceServingEndpointsEndpointsAiGatewayGuardrailsOutputPii struct { type DataSourceServingEndpointsEndpointsAiGatewayGuardrailsOutput struct { InvalidKeywords []string `json:"invalid_keywords,omitempty"` + Pii []DataSourceServingEndpointsEndpointsAiGatewayGuardrailsOutputPii `json:"pii,omitempty"` Safety bool `json:"safety,omitempty"` ValidTopics []string `json:"valid_topics,omitempty"` - Pii []DataSourceServingEndpointsEndpointsAiGatewayGuardrailsOutputPii `json:"pii,omitempty"` } type DataSourceServingEndpointsEndpointsAiGatewayGuardrails struct { @@ -111,17 +111,17 @@ type DataSourceServingEndpointsEndpointsConfigServedEntitiesExternalModelPalmCon } type DataSourceServingEndpointsEndpointsConfigServedEntitiesExternalModel struct { - Name string `json:"name"` - Provider string `json:"provider"` - Task string `json:"task"` Ai21LabsConfig []DataSourceServingEndpointsEndpointsConfigServedEntitiesExternalModelAi21LabsConfig `json:"ai21labs_config,omitempty"` AmazonBedrockConfig []DataSourceServingEndpointsEndpointsConfigServedEntitiesExternalModelAmazonBedrockConfig `json:"amazon_bedrock_config,omitempty"` AnthropicConfig []DataSourceServingEndpointsEndpointsConfigServedEntitiesExternalModelAnthropicConfig `json:"anthropic_config,omitempty"` CohereConfig []DataSourceServingEndpointsEndpointsConfigServedEntitiesExternalModelCohereConfig `json:"cohere_config,omitempty"` DatabricksModelServingConfig []DataSourceServingEndpointsEndpointsConfigServedEntitiesExternalModelDatabricksModelServingConfig `json:"databricks_model_serving_config,omitempty"` GoogleCloudVertexAiConfig []DataSourceServingEndpointsEndpointsConfigServedEntitiesExternalModelGoogleCloudVertexAiConfig `json:"google_cloud_vertex_ai_config,omitempty"` + Name string `json:"name"` OpenaiConfig []DataSourceServingEndpointsEndpointsConfigServedEntitiesExternalModelOpenaiConfig `json:"openai_config,omitempty"` PalmConfig []DataSourceServingEndpointsEndpointsConfigServedEntitiesExternalModelPalmConfig `json:"palm_config,omitempty"` + Provider string `json:"provider"` + Task string `json:"task"` } type DataSourceServingEndpointsEndpointsConfigServedEntitiesFoundationModel struct { @@ -134,9 +134,9 @@ type DataSourceServingEndpointsEndpointsConfigServedEntitiesFoundationModel stru type DataSourceServingEndpointsEndpointsConfigServedEntities struct { EntityName string `json:"entity_name,omitempty"` EntityVersion string `json:"entity_version,omitempty"` - Name string `json:"name,omitempty"` ExternalModel []DataSourceServingEndpointsEndpointsConfigServedEntitiesExternalModel `json:"external_model,omitempty"` FoundationModel []DataSourceServingEndpointsEndpointsConfigServedEntitiesFoundationModel `json:"foundation_model,omitempty"` + Name string `json:"name,omitempty"` } type DataSourceServingEndpointsEndpointsConfigServedModels struct { @@ -161,16 +161,16 @@ type DataSourceServingEndpointsEndpointsTags struct { } type DataSourceServingEndpointsEndpoints struct { + AiGateway []DataSourceServingEndpointsEndpointsAiGateway `json:"ai_gateway,omitempty"` + Config []DataSourceServingEndpointsEndpointsConfig `json:"config,omitempty"` CreationTimestamp int `json:"creation_timestamp,omitempty"` Creator string `json:"creator,omitempty"` Id string `json:"id,omitempty"` LastUpdatedTimestamp int `json:"last_updated_timestamp,omitempty"` Name string `json:"name,omitempty"` - Task string `json:"task,omitempty"` - AiGateway []DataSourceServingEndpointsEndpointsAiGateway `json:"ai_gateway,omitempty"` - Config []DataSourceServingEndpointsEndpointsConfig `json:"config,omitempty"` State []DataSourceServingEndpointsEndpointsState `json:"state,omitempty"` Tags []DataSourceServingEndpointsEndpointsTags `json:"tags,omitempty"` + Task string `json:"task,omitempty"` } type DataSourceServingEndpoints struct { diff --git a/bundle/internal/tf/schema/data_sources.go b/bundle/internal/tf/schema/data_sources.go index 3a59bf8c3..1880db25a 100644 --- a/bundle/internal/tf/schema/data_sources.go +++ b/bundle/internal/tf/schema/data_sources.go @@ -3,6 +3,8 @@ package schema type DataSources struct { + App map[string]any `json:"databricks_app,omitempty"` + Apps map[string]any `json:"databricks_apps,omitempty"` AwsAssumeRolePolicy map[string]any `json:"databricks_aws_assume_role_policy,omitempty"` AwsBucketPolicy map[string]any `json:"databricks_aws_bucket_policy,omitempty"` AwsCrossaccountPolicy map[string]any `json:"databricks_aws_crossaccount_policy,omitempty"` @@ -66,6 +68,8 @@ type DataSources struct { func NewDataSources() *DataSources { return &DataSources{ + App: make(map[string]any), + Apps: make(map[string]any), AwsAssumeRolePolicy: make(map[string]any), AwsBucketPolicy: make(map[string]any), AwsCrossaccountPolicy: make(map[string]any), diff --git a/bundle/internal/tf/schema/resource_app.go b/bundle/internal/tf/schema/resource_app.go new file mode 100644 index 000000000..14c93b793 --- /dev/null +++ b/bundle/internal/tf/schema/resource_app.go @@ -0,0 +1,102 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. + +package schema + +type ResourceAppActiveDeploymentDeploymentArtifacts struct { + SourceCodePath string `json:"source_code_path,omitempty"` +} + +type ResourceAppActiveDeploymentStatus struct { + Message string `json:"message,omitempty"` + State string `json:"state,omitempty"` +} + +type ResourceAppActiveDeployment struct { + CreateTime string `json:"create_time,omitempty"` + Creator string `json:"creator,omitempty"` + DeploymentArtifacts *ResourceAppActiveDeploymentDeploymentArtifacts `json:"deployment_artifacts,omitempty"` + DeploymentId string `json:"deployment_id,omitempty"` + Mode string `json:"mode,omitempty"` + SourceCodePath string `json:"source_code_path,omitempty"` + Status *ResourceAppActiveDeploymentStatus `json:"status,omitempty"` + UpdateTime string `json:"update_time,omitempty"` +} + +type ResourceAppAppStatus struct { + Message string `json:"message,omitempty"` + State string `json:"state,omitempty"` +} + +type ResourceAppComputeStatus struct { + Message string `json:"message,omitempty"` + State string `json:"state,omitempty"` +} + +type ResourceAppPendingDeploymentDeploymentArtifacts struct { + SourceCodePath string `json:"source_code_path,omitempty"` +} + +type ResourceAppPendingDeploymentStatus struct { + Message string `json:"message,omitempty"` + State string `json:"state,omitempty"` +} + +type ResourceAppPendingDeployment struct { + CreateTime string `json:"create_time,omitempty"` + Creator string `json:"creator,omitempty"` + DeploymentArtifacts *ResourceAppPendingDeploymentDeploymentArtifacts `json:"deployment_artifacts,omitempty"` + DeploymentId string `json:"deployment_id,omitempty"` + Mode string `json:"mode,omitempty"` + SourceCodePath string `json:"source_code_path,omitempty"` + Status *ResourceAppPendingDeploymentStatus `json:"status,omitempty"` + UpdateTime string `json:"update_time,omitempty"` +} + +type ResourceAppResourcesJob struct { + Id string `json:"id"` + Permission string `json:"permission"` +} + +type ResourceAppResourcesSecret struct { + Key string `json:"key"` + Permission string `json:"permission"` + Scope string `json:"scope"` +} + +type ResourceAppResourcesServingEndpoint struct { + Name string `json:"name"` + Permission string `json:"permission"` +} + +type ResourceAppResourcesSqlWarehouse struct { + Id string `json:"id"` + Permission string `json:"permission"` +} + +type ResourceAppResources struct { + Description string `json:"description,omitempty"` + Job *ResourceAppResourcesJob `json:"job,omitempty"` + Name string `json:"name"` + Secret *ResourceAppResourcesSecret `json:"secret,omitempty"` + ServingEndpoint *ResourceAppResourcesServingEndpoint `json:"serving_endpoint,omitempty"` + SqlWarehouse *ResourceAppResourcesSqlWarehouse `json:"sql_warehouse,omitempty"` +} + +type ResourceApp struct { + ActiveDeployment *ResourceAppActiveDeployment `json:"active_deployment,omitempty"` + AppStatus *ResourceAppAppStatus `json:"app_status,omitempty"` + ComputeStatus *ResourceAppComputeStatus `json:"compute_status,omitempty"` + CreateTime string `json:"create_time,omitempty"` + Creator string `json:"creator,omitempty"` + DefaultSourceCodePath string `json:"default_source_code_path,omitempty"` + Description string `json:"description,omitempty"` + Name string `json:"name"` + PendingDeployment *ResourceAppPendingDeployment `json:"pending_deployment,omitempty"` + Resources []ResourceAppResources `json:"resources,omitempty"` + ServicePrincipalClientId string `json:"service_principal_client_id,omitempty"` + ServicePrincipalId int `json:"service_principal_id,omitempty"` + ServicePrincipalName string `json:"service_principal_name,omitempty"` + UpdateTime string `json:"update_time,omitempty"` + Updater string `json:"updater,omitempty"` + Url string `json:"url,omitempty"` +} diff --git a/bundle/internal/tf/schema/resource_azure_adls_gen2_mount.go b/bundle/internal/tf/schema/resource_azure_adls_gen2_mount.go index d0f96d54e..6e2ea08e8 100644 --- a/bundle/internal/tf/schema/resource_azure_adls_gen2_mount.go +++ b/bundle/internal/tf/schema/resource_azure_adls_gen2_mount.go @@ -9,6 +9,7 @@ type ResourceAzureAdlsGen2Mount struct { ClusterId string `json:"cluster_id,omitempty"` ContainerName string `json:"container_name"` Directory string `json:"directory,omitempty"` + Environment string `json:"environment,omitempty"` Id string `json:"id,omitempty"` InitializeFileSystem bool `json:"initialize_file_system"` MountName string `json:"mount_name"` diff --git a/bundle/internal/tf/schema/resource_cluster.go b/bundle/internal/tf/schema/resource_cluster.go index 4ae063c89..50395add9 100644 --- a/bundle/internal/tf/schema/resource_cluster.go +++ b/bundle/internal/tf/schema/resource_cluster.go @@ -176,6 +176,8 @@ type ResourceCluster struct { IdempotencyToken string `json:"idempotency_token,omitempty"` InstancePoolId string `json:"instance_pool_id,omitempty"` IsPinned bool `json:"is_pinned,omitempty"` + IsSingleNode bool `json:"is_single_node,omitempty"` + Kind string `json:"kind,omitempty"` NoWait bool `json:"no_wait,omitempty"` NodeTypeId string `json:"node_type_id,omitempty"` NumWorkers int `json:"num_workers,omitempty"` @@ -188,6 +190,7 @@ type ResourceCluster struct { SshPublicKeys []string `json:"ssh_public_keys,omitempty"` State string `json:"state,omitempty"` Url string `json:"url,omitempty"` + UseMlRuntime bool `json:"use_ml_runtime,omitempty"` Autoscale *ResourceClusterAutoscale `json:"autoscale,omitempty"` AwsAttributes *ResourceClusterAwsAttributes `json:"aws_attributes,omitempty"` AzureAttributes *ResourceClusterAzureAttributes `json:"azure_attributes,omitempty"` diff --git a/bundle/internal/tf/schema/resource_credential.go b/bundle/internal/tf/schema/resource_credential.go new file mode 100644 index 000000000..9d47219ea --- /dev/null +++ b/bundle/internal/tf/schema/resource_credential.go @@ -0,0 +1,52 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. + +package schema + +type ResourceCredentialAwsIamRole struct { + ExternalId string `json:"external_id,omitempty"` + RoleArn string `json:"role_arn,omitempty"` + UnityCatalogIamArn string `json:"unity_catalog_iam_arn,omitempty"` +} + +type ResourceCredentialAzureManagedIdentity struct { + AccessConnectorId string `json:"access_connector_id"` + CredentialId string `json:"credential_id,omitempty"` + ManagedIdentityId string `json:"managed_identity_id,omitempty"` +} + +type ResourceCredentialAzureServicePrincipal struct { + ApplicationId string `json:"application_id"` + ClientSecret string `json:"client_secret"` + DirectoryId string `json:"directory_id"` +} + +type ResourceCredentialDatabricksGcpServiceAccount struct { + CredentialId string `json:"credential_id,omitempty"` + Email string `json:"email,omitempty"` + PrivateKeyId string `json:"private_key_id,omitempty"` +} + +type ResourceCredential struct { + Comment string `json:"comment,omitempty"` + CreatedAt int `json:"created_at,omitempty"` + CreatedBy string `json:"created_by,omitempty"` + CredentialId string `json:"credential_id,omitempty"` + ForceDestroy bool `json:"force_destroy,omitempty"` + ForceUpdate bool `json:"force_update,omitempty"` + FullName string `json:"full_name,omitempty"` + Id string `json:"id,omitempty"` + IsolationMode string `json:"isolation_mode,omitempty"` + MetastoreId string `json:"metastore_id,omitempty"` + Name string `json:"name"` + Owner string `json:"owner,omitempty"` + Purpose string `json:"purpose"` + ReadOnly bool `json:"read_only,omitempty"` + SkipValidation bool `json:"skip_validation,omitempty"` + UpdatedAt int `json:"updated_at,omitempty"` + UpdatedBy string `json:"updated_by,omitempty"` + UsedForManagedStorage bool `json:"used_for_managed_storage,omitempty"` + AwsIamRole *ResourceCredentialAwsIamRole `json:"aws_iam_role,omitempty"` + AzureManagedIdentity *ResourceCredentialAzureManagedIdentity `json:"azure_managed_identity,omitempty"` + AzureServicePrincipal *ResourceCredentialAzureServicePrincipal `json:"azure_service_principal,omitempty"` + DatabricksGcpServiceAccount *ResourceCredentialDatabricksGcpServiceAccount `json:"databricks_gcp_service_account,omitempty"` +} diff --git a/bundle/internal/tf/schema/resource_grant.go b/bundle/internal/tf/schema/resource_grant.go index d8569f304..6ed97791c 100644 --- a/bundle/internal/tf/schema/resource_grant.go +++ b/bundle/internal/tf/schema/resource_grant.go @@ -4,6 +4,7 @@ package schema type ResourceGrant struct { Catalog string `json:"catalog,omitempty"` + Credential string `json:"credential,omitempty"` ExternalLocation string `json:"external_location,omitempty"` ForeignConnection string `json:"foreign_connection,omitempty"` Function string `json:"function,omitempty"` diff --git a/bundle/internal/tf/schema/resource_grants.go b/bundle/internal/tf/schema/resource_grants.go index dd00152fb..474a9950f 100644 --- a/bundle/internal/tf/schema/resource_grants.go +++ b/bundle/internal/tf/schema/resource_grants.go @@ -9,6 +9,7 @@ type ResourceGrantsGrant struct { type ResourceGrants struct { Catalog string `json:"catalog,omitempty"` + Credential string `json:"credential,omitempty"` ExternalLocation string `json:"external_location,omitempty"` ForeignConnection string `json:"foreign_connection,omitempty"` Function string `json:"function,omitempty"` diff --git a/bundle/internal/tf/schema/resource_job.go b/bundle/internal/tf/schema/resource_job.go index c89eafab9..63c8aeb7b 100644 --- a/bundle/internal/tf/schema/resource_job.go +++ b/bundle/internal/tf/schema/resource_job.go @@ -240,6 +240,8 @@ type ResourceJobJobClusterNewCluster struct { EnableLocalDiskEncryption bool `json:"enable_local_disk_encryption,omitempty"` IdempotencyToken string `json:"idempotency_token,omitempty"` InstancePoolId string `json:"instance_pool_id,omitempty"` + IsSingleNode bool `json:"is_single_node,omitempty"` + Kind string `json:"kind,omitempty"` NodeTypeId string `json:"node_type_id,omitempty"` NumWorkers int `json:"num_workers,omitempty"` PolicyId string `json:"policy_id,omitempty"` @@ -249,6 +251,7 @@ type ResourceJobJobClusterNewCluster struct { SparkEnvVars map[string]string `json:"spark_env_vars,omitempty"` SparkVersion string `json:"spark_version"` SshPublicKeys []string `json:"ssh_public_keys,omitempty"` + UseMlRuntime bool `json:"use_ml_runtime,omitempty"` Autoscale *ResourceJobJobClusterNewClusterAutoscale `json:"autoscale,omitempty"` AwsAttributes *ResourceJobJobClusterNewClusterAwsAttributes `json:"aws_attributes,omitempty"` AzureAttributes *ResourceJobJobClusterNewClusterAzureAttributes `json:"azure_attributes,omitempty"` @@ -462,6 +465,8 @@ type ResourceJobNewCluster struct { EnableLocalDiskEncryption bool `json:"enable_local_disk_encryption,omitempty"` IdempotencyToken string `json:"idempotency_token,omitempty"` InstancePoolId string `json:"instance_pool_id,omitempty"` + IsSingleNode bool `json:"is_single_node,omitempty"` + Kind string `json:"kind,omitempty"` NodeTypeId string `json:"node_type_id,omitempty"` NumWorkers int `json:"num_workers,omitempty"` PolicyId string `json:"policy_id,omitempty"` @@ -471,6 +476,7 @@ type ResourceJobNewCluster struct { SparkEnvVars map[string]string `json:"spark_env_vars,omitempty"` SparkVersion string `json:"spark_version"` SshPublicKeys []string `json:"ssh_public_keys,omitempty"` + UseMlRuntime bool `json:"use_ml_runtime,omitempty"` Autoscale *ResourceJobNewClusterAutoscale `json:"autoscale,omitempty"` AwsAttributes *ResourceJobNewClusterAwsAttributes `json:"aws_attributes,omitempty"` AzureAttributes *ResourceJobNewClusterAzureAttributes `json:"azure_attributes,omitempty"` @@ -548,6 +554,13 @@ type ResourceJobSparkSubmitTask struct { Parameters []string `json:"parameters,omitempty"` } +type ResourceJobTaskCleanRoomsNotebookTask struct { + CleanRoomName string `json:"clean_room_name"` + Etag string `json:"etag,omitempty"` + NotebookBaseParameters map[string]string `json:"notebook_base_parameters,omitempty"` + NotebookName string `json:"notebook_name"` +} + type ResourceJobTaskConditionTask struct { Left string `json:"left"` Op string `json:"op"` @@ -578,6 +591,13 @@ type ResourceJobTaskEmailNotifications struct { OnSuccess []string `json:"on_success,omitempty"` } +type ResourceJobTaskForEachTaskTaskCleanRoomsNotebookTask struct { + CleanRoomName string `json:"clean_room_name"` + Etag string `json:"etag,omitempty"` + NotebookBaseParameters map[string]string `json:"notebook_base_parameters,omitempty"` + NotebookName string `json:"notebook_name"` +} + type ResourceJobTaskForEachTaskTaskConditionTask struct { Left string `json:"left"` Op string `json:"op"` @@ -814,6 +834,8 @@ type ResourceJobTaskForEachTaskTaskNewCluster struct { EnableLocalDiskEncryption bool `json:"enable_local_disk_encryption,omitempty"` IdempotencyToken string `json:"idempotency_token,omitempty"` InstancePoolId string `json:"instance_pool_id,omitempty"` + IsSingleNode bool `json:"is_single_node,omitempty"` + Kind string `json:"kind,omitempty"` NodeTypeId string `json:"node_type_id,omitempty"` NumWorkers int `json:"num_workers,omitempty"` PolicyId string `json:"policy_id,omitempty"` @@ -823,6 +845,7 @@ type ResourceJobTaskForEachTaskTaskNewCluster struct { SparkEnvVars map[string]string `json:"spark_env_vars,omitempty"` SparkVersion string `json:"spark_version"` SshPublicKeys []string `json:"ssh_public_keys,omitempty"` + UseMlRuntime bool `json:"use_ml_runtime,omitempty"` Autoscale *ResourceJobTaskForEachTaskTaskNewClusterAutoscale `json:"autoscale,omitempty"` AwsAttributes *ResourceJobTaskForEachTaskTaskNewClusterAwsAttributes `json:"aws_attributes,omitempty"` AzureAttributes *ResourceJobTaskForEachTaskTaskNewClusterAzureAttributes `json:"azure_attributes,omitempty"` @@ -963,34 +986,35 @@ type ResourceJobTaskForEachTaskTaskWebhookNotifications struct { } type ResourceJobTaskForEachTaskTask struct { - Description string `json:"description,omitempty"` - DisableAutoOptimization bool `json:"disable_auto_optimization,omitempty"` - EnvironmentKey string `json:"environment_key,omitempty"` - ExistingClusterId string `json:"existing_cluster_id,omitempty"` - JobClusterKey string `json:"job_cluster_key,omitempty"` - MaxRetries int `json:"max_retries,omitempty"` - MinRetryIntervalMillis int `json:"min_retry_interval_millis,omitempty"` - RetryOnTimeout bool `json:"retry_on_timeout,omitempty"` - RunIf string `json:"run_if,omitempty"` - TaskKey string `json:"task_key"` - TimeoutSeconds int `json:"timeout_seconds,omitempty"` - ConditionTask *ResourceJobTaskForEachTaskTaskConditionTask `json:"condition_task,omitempty"` - DbtTask *ResourceJobTaskForEachTaskTaskDbtTask `json:"dbt_task,omitempty"` - DependsOn []ResourceJobTaskForEachTaskTaskDependsOn `json:"depends_on,omitempty"` - EmailNotifications *ResourceJobTaskForEachTaskTaskEmailNotifications `json:"email_notifications,omitempty"` - Health *ResourceJobTaskForEachTaskTaskHealth `json:"health,omitempty"` - Library []ResourceJobTaskForEachTaskTaskLibrary `json:"library,omitempty"` - NewCluster *ResourceJobTaskForEachTaskTaskNewCluster `json:"new_cluster,omitempty"` - NotebookTask *ResourceJobTaskForEachTaskTaskNotebookTask `json:"notebook_task,omitempty"` - NotificationSettings *ResourceJobTaskForEachTaskTaskNotificationSettings `json:"notification_settings,omitempty"` - PipelineTask *ResourceJobTaskForEachTaskTaskPipelineTask `json:"pipeline_task,omitempty"` - PythonWheelTask *ResourceJobTaskForEachTaskTaskPythonWheelTask `json:"python_wheel_task,omitempty"` - RunJobTask *ResourceJobTaskForEachTaskTaskRunJobTask `json:"run_job_task,omitempty"` - SparkJarTask *ResourceJobTaskForEachTaskTaskSparkJarTask `json:"spark_jar_task,omitempty"` - SparkPythonTask *ResourceJobTaskForEachTaskTaskSparkPythonTask `json:"spark_python_task,omitempty"` - SparkSubmitTask *ResourceJobTaskForEachTaskTaskSparkSubmitTask `json:"spark_submit_task,omitempty"` - SqlTask *ResourceJobTaskForEachTaskTaskSqlTask `json:"sql_task,omitempty"` - WebhookNotifications *ResourceJobTaskForEachTaskTaskWebhookNotifications `json:"webhook_notifications,omitempty"` + Description string `json:"description,omitempty"` + DisableAutoOptimization bool `json:"disable_auto_optimization,omitempty"` + EnvironmentKey string `json:"environment_key,omitempty"` + ExistingClusterId string `json:"existing_cluster_id,omitempty"` + JobClusterKey string `json:"job_cluster_key,omitempty"` + MaxRetries int `json:"max_retries,omitempty"` + MinRetryIntervalMillis int `json:"min_retry_interval_millis,omitempty"` + RetryOnTimeout bool `json:"retry_on_timeout,omitempty"` + RunIf string `json:"run_if,omitempty"` + TaskKey string `json:"task_key"` + TimeoutSeconds int `json:"timeout_seconds,omitempty"` + CleanRoomsNotebookTask *ResourceJobTaskForEachTaskTaskCleanRoomsNotebookTask `json:"clean_rooms_notebook_task,omitempty"` + ConditionTask *ResourceJobTaskForEachTaskTaskConditionTask `json:"condition_task,omitempty"` + DbtTask *ResourceJobTaskForEachTaskTaskDbtTask `json:"dbt_task,omitempty"` + DependsOn []ResourceJobTaskForEachTaskTaskDependsOn `json:"depends_on,omitempty"` + EmailNotifications *ResourceJobTaskForEachTaskTaskEmailNotifications `json:"email_notifications,omitempty"` + Health *ResourceJobTaskForEachTaskTaskHealth `json:"health,omitempty"` + Library []ResourceJobTaskForEachTaskTaskLibrary `json:"library,omitempty"` + NewCluster *ResourceJobTaskForEachTaskTaskNewCluster `json:"new_cluster,omitempty"` + NotebookTask *ResourceJobTaskForEachTaskTaskNotebookTask `json:"notebook_task,omitempty"` + NotificationSettings *ResourceJobTaskForEachTaskTaskNotificationSettings `json:"notification_settings,omitempty"` + PipelineTask *ResourceJobTaskForEachTaskTaskPipelineTask `json:"pipeline_task,omitempty"` + PythonWheelTask *ResourceJobTaskForEachTaskTaskPythonWheelTask `json:"python_wheel_task,omitempty"` + RunJobTask *ResourceJobTaskForEachTaskTaskRunJobTask `json:"run_job_task,omitempty"` + SparkJarTask *ResourceJobTaskForEachTaskTaskSparkJarTask `json:"spark_jar_task,omitempty"` + SparkPythonTask *ResourceJobTaskForEachTaskTaskSparkPythonTask `json:"spark_python_task,omitempty"` + SparkSubmitTask *ResourceJobTaskForEachTaskTaskSparkSubmitTask `json:"spark_submit_task,omitempty"` + SqlTask *ResourceJobTaskForEachTaskTaskSqlTask `json:"sql_task,omitempty"` + WebhookNotifications *ResourceJobTaskForEachTaskTaskWebhookNotifications `json:"webhook_notifications,omitempty"` } type ResourceJobTaskForEachTask struct { @@ -1205,6 +1229,8 @@ type ResourceJobTaskNewCluster struct { EnableLocalDiskEncryption bool `json:"enable_local_disk_encryption,omitempty"` IdempotencyToken string `json:"idempotency_token,omitempty"` InstancePoolId string `json:"instance_pool_id,omitempty"` + IsSingleNode bool `json:"is_single_node,omitempty"` + Kind string `json:"kind,omitempty"` NodeTypeId string `json:"node_type_id,omitempty"` NumWorkers int `json:"num_workers,omitempty"` PolicyId string `json:"policy_id,omitempty"` @@ -1214,6 +1240,7 @@ type ResourceJobTaskNewCluster struct { SparkEnvVars map[string]string `json:"spark_env_vars,omitempty"` SparkVersion string `json:"spark_version"` SshPublicKeys []string `json:"ssh_public_keys,omitempty"` + UseMlRuntime bool `json:"use_ml_runtime,omitempty"` Autoscale *ResourceJobTaskNewClusterAutoscale `json:"autoscale,omitempty"` AwsAttributes *ResourceJobTaskNewClusterAwsAttributes `json:"aws_attributes,omitempty"` AzureAttributes *ResourceJobTaskNewClusterAzureAttributes `json:"azure_attributes,omitempty"` @@ -1354,35 +1381,36 @@ type ResourceJobTaskWebhookNotifications struct { } type ResourceJobTask struct { - Description string `json:"description,omitempty"` - DisableAutoOptimization bool `json:"disable_auto_optimization,omitempty"` - EnvironmentKey string `json:"environment_key,omitempty"` - ExistingClusterId string `json:"existing_cluster_id,omitempty"` - JobClusterKey string `json:"job_cluster_key,omitempty"` - MaxRetries int `json:"max_retries,omitempty"` - MinRetryIntervalMillis int `json:"min_retry_interval_millis,omitempty"` - RetryOnTimeout bool `json:"retry_on_timeout,omitempty"` - RunIf string `json:"run_if,omitempty"` - TaskKey string `json:"task_key"` - TimeoutSeconds int `json:"timeout_seconds,omitempty"` - ConditionTask *ResourceJobTaskConditionTask `json:"condition_task,omitempty"` - DbtTask *ResourceJobTaskDbtTask `json:"dbt_task,omitempty"` - DependsOn []ResourceJobTaskDependsOn `json:"depends_on,omitempty"` - EmailNotifications *ResourceJobTaskEmailNotifications `json:"email_notifications,omitempty"` - ForEachTask *ResourceJobTaskForEachTask `json:"for_each_task,omitempty"` - Health *ResourceJobTaskHealth `json:"health,omitempty"` - Library []ResourceJobTaskLibrary `json:"library,omitempty"` - NewCluster *ResourceJobTaskNewCluster `json:"new_cluster,omitempty"` - NotebookTask *ResourceJobTaskNotebookTask `json:"notebook_task,omitempty"` - NotificationSettings *ResourceJobTaskNotificationSettings `json:"notification_settings,omitempty"` - PipelineTask *ResourceJobTaskPipelineTask `json:"pipeline_task,omitempty"` - PythonWheelTask *ResourceJobTaskPythonWheelTask `json:"python_wheel_task,omitempty"` - RunJobTask *ResourceJobTaskRunJobTask `json:"run_job_task,omitempty"` - SparkJarTask *ResourceJobTaskSparkJarTask `json:"spark_jar_task,omitempty"` - SparkPythonTask *ResourceJobTaskSparkPythonTask `json:"spark_python_task,omitempty"` - SparkSubmitTask *ResourceJobTaskSparkSubmitTask `json:"spark_submit_task,omitempty"` - SqlTask *ResourceJobTaskSqlTask `json:"sql_task,omitempty"` - WebhookNotifications *ResourceJobTaskWebhookNotifications `json:"webhook_notifications,omitempty"` + Description string `json:"description,omitempty"` + DisableAutoOptimization bool `json:"disable_auto_optimization,omitempty"` + EnvironmentKey string `json:"environment_key,omitempty"` + ExistingClusterId string `json:"existing_cluster_id,omitempty"` + JobClusterKey string `json:"job_cluster_key,omitempty"` + MaxRetries int `json:"max_retries,omitempty"` + MinRetryIntervalMillis int `json:"min_retry_interval_millis,omitempty"` + RetryOnTimeout bool `json:"retry_on_timeout,omitempty"` + RunIf string `json:"run_if,omitempty"` + TaskKey string `json:"task_key"` + TimeoutSeconds int `json:"timeout_seconds,omitempty"` + CleanRoomsNotebookTask *ResourceJobTaskCleanRoomsNotebookTask `json:"clean_rooms_notebook_task,omitempty"` + ConditionTask *ResourceJobTaskConditionTask `json:"condition_task,omitempty"` + DbtTask *ResourceJobTaskDbtTask `json:"dbt_task,omitempty"` + DependsOn []ResourceJobTaskDependsOn `json:"depends_on,omitempty"` + EmailNotifications *ResourceJobTaskEmailNotifications `json:"email_notifications,omitempty"` + ForEachTask *ResourceJobTaskForEachTask `json:"for_each_task,omitempty"` + Health *ResourceJobTaskHealth `json:"health,omitempty"` + Library []ResourceJobTaskLibrary `json:"library,omitempty"` + NewCluster *ResourceJobTaskNewCluster `json:"new_cluster,omitempty"` + NotebookTask *ResourceJobTaskNotebookTask `json:"notebook_task,omitempty"` + NotificationSettings *ResourceJobTaskNotificationSettings `json:"notification_settings,omitempty"` + PipelineTask *ResourceJobTaskPipelineTask `json:"pipeline_task,omitempty"` + PythonWheelTask *ResourceJobTaskPythonWheelTask `json:"python_wheel_task,omitempty"` + RunJobTask *ResourceJobTaskRunJobTask `json:"run_job_task,omitempty"` + SparkJarTask *ResourceJobTaskSparkJarTask `json:"spark_jar_task,omitempty"` + SparkPythonTask *ResourceJobTaskSparkPythonTask `json:"spark_python_task,omitempty"` + SparkSubmitTask *ResourceJobTaskSparkSubmitTask `json:"spark_submit_task,omitempty"` + SqlTask *ResourceJobTaskSqlTask `json:"sql_task,omitempty"` + WebhookNotifications *ResourceJobTaskWebhookNotifications `json:"webhook_notifications,omitempty"` } type ResourceJobTriggerFileArrival struct { diff --git a/bundle/internal/tf/schema/resource_permissions.go b/bundle/internal/tf/schema/resource_permissions.go index a3d05e6f2..7dfb84b5f 100644 --- a/bundle/internal/tf/schema/resource_permissions.go +++ b/bundle/internal/tf/schema/resource_permissions.go @@ -10,6 +10,7 @@ type ResourcePermissionsAccessControl struct { } type ResourcePermissions struct { + AppName string `json:"app_name,omitempty"` Authorization string `json:"authorization,omitempty"` ClusterId string `json:"cluster_id,omitempty"` ClusterPolicyId string `json:"cluster_policy_id,omitempty"` diff --git a/bundle/internal/tf/schema/resource_pipeline.go b/bundle/internal/tf/schema/resource_pipeline.go index 7238d24a8..ebdb85027 100644 --- a/bundle/internal/tf/schema/resource_pipeline.go +++ b/bundle/internal/tf/schema/resource_pipeline.go @@ -244,9 +244,9 @@ type ResourcePipelineNotification struct { } type ResourcePipelineRestartWindow struct { - DaysOfWeek string `json:"days_of_week,omitempty"` - StartHour int `json:"start_hour"` - TimeZoneId string `json:"time_zone_id,omitempty"` + DaysOfWeek []string `json:"days_of_week,omitempty"` + StartHour int `json:"start_hour"` + TimeZoneId string `json:"time_zone_id,omitempty"` } type ResourcePipelineTriggerCron struct { diff --git a/bundle/internal/tf/schema/resources.go b/bundle/internal/tf/schema/resources.go index ea5b618fd..b57c2711a 100644 --- a/bundle/internal/tf/schema/resources.go +++ b/bundle/internal/tf/schema/resources.go @@ -5,6 +5,7 @@ package schema type Resources struct { AccessControlRuleSet map[string]any `json:"databricks_access_control_rule_set,omitempty"` Alert map[string]any `json:"databricks_alert,omitempty"` + App map[string]any `json:"databricks_app,omitempty"` ArtifactAllowlist map[string]any `json:"databricks_artifact_allowlist,omitempty"` AutomaticClusterUpdateWorkspaceSetting map[string]any `json:"databricks_automatic_cluster_update_workspace_setting,omitempty"` AwsS3Mount map[string]any `json:"databricks_aws_s3_mount,omitempty"` @@ -18,6 +19,7 @@ type Resources struct { ClusterPolicy map[string]any `json:"databricks_cluster_policy,omitempty"` ComplianceSecurityProfileWorkspaceSetting map[string]any `json:"databricks_compliance_security_profile_workspace_setting,omitempty"` Connection map[string]any `json:"databricks_connection,omitempty"` + Credential map[string]any `json:"databricks_credential,omitempty"` CustomAppIntegration map[string]any `json:"databricks_custom_app_integration,omitempty"` Dashboard map[string]any `json:"databricks_dashboard,omitempty"` DbfsFile map[string]any `json:"databricks_dbfs_file,omitempty"` @@ -111,6 +113,7 @@ func NewResources() *Resources { return &Resources{ AccessControlRuleSet: make(map[string]any), Alert: make(map[string]any), + App: make(map[string]any), ArtifactAllowlist: make(map[string]any), AutomaticClusterUpdateWorkspaceSetting: make(map[string]any), AwsS3Mount: make(map[string]any), @@ -124,6 +127,7 @@ func NewResources() *Resources { ClusterPolicy: make(map[string]any), ComplianceSecurityProfileWorkspaceSetting: make(map[string]any), Connection: make(map[string]any), + Credential: make(map[string]any), CustomAppIntegration: make(map[string]any), Dashboard: make(map[string]any), DbfsFile: make(map[string]any), diff --git a/bundle/internal/tf/schema/root.go b/bundle/internal/tf/schema/root.go index 2cadb8090..1f89dc64d 100644 --- a/bundle/internal/tf/schema/root.go +++ b/bundle/internal/tf/schema/root.go @@ -21,7 +21,7 @@ type Root struct { const ProviderHost = "registry.terraform.io" const ProviderSource = "databricks/databricks" -const ProviderVersion = "1.59.0" +const ProviderVersion = "1.62.0" func NewRoot() *Root { return &Root{ diff --git a/bundle/libraries/expand_glob_references.go b/bundle/libraries/expand_glob_references.go index c71615e0e..bb1905045 100644 --- a/bundle/libraries/expand_glob_references.go +++ b/bundle/libraries/expand_glob_references.go @@ -11,8 +11,7 @@ import ( "github.com/databricks/cli/libs/dyn" ) -type expand struct { -} +type expand struct{} func matchError(p dyn.Path, l []dyn.Location, message string) diag.Diagnostic { return diag.Diagnostic{ @@ -189,7 +188,6 @@ func (e *expand) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { diags = diags.Extend(d) return dyn.V(output), nil }) - if err != nil { return dyn.InvalidValue, err } @@ -197,7 +195,6 @@ func (e *expand) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { return v, nil }) - if err != nil { diags = diags.Extend(diag.FromErr(err)) } diff --git a/bundle/libraries/filer_volume_test.go b/bundle/libraries/filer_volume_test.go index 0d886824d..7b2f5c5ba 100644 --- a/bundle/libraries/filer_volume_test.go +++ b/bundle/libraries/filer_volume_test.go @@ -110,7 +110,8 @@ func TestFilerForVolumeForErrorFromAPI(t *testing.T) { Summary: "unable to determine if volume at /Volumes/main/my_schema/my_volume exists: error from API", Locations: []dyn.Location{{File: "config.yml", Line: 1, Column: 2}}, Paths: []dyn.Path{dyn.MustPathFromString("workspace.artifact_path")}, - }}, diags) + }, + }, diags) } func TestFilerForVolumeWithVolumeNotFound(t *testing.T) { @@ -136,7 +137,8 @@ func TestFilerForVolumeWithVolumeNotFound(t *testing.T) { Summary: "volume /Volumes/main/my_schema/doesnotexist does not exist: some error message", Locations: []dyn.Location{{File: "config.yml", Line: 1, Column: 2}}, Paths: []dyn.Path{dyn.MustPathFromString("workspace.artifact_path")}, - }}, diags) + }, + }, diags) } func TestFilerForVolumeNotFoundAndInBundle(t *testing.T) { @@ -173,7 +175,7 @@ func TestFilerForVolumeNotFoundAndInBundle(t *testing.T) { { Severity: diag.Error, Summary: "volume /Volumes/main/my_schema/my_volume does not exist: error from API", - Locations: []dyn.Location{{"config.yml", 1, 2}, {"volume.yml", 1, 2}}, + Locations: []dyn.Location{{File: "config.yml", Line: 1, Column: 2}, {File: "volume.yml", Line: 1, Column: 2}}, Paths: []dyn.Path{dyn.MustPathFromString("workspace.artifact_path"), dyn.MustPathFromString("resources.volumes.foo")}, Detail: `You are using a volume in your artifact_path that is managed by this bundle but which has not been deployed yet. Please first deploy diff --git a/bundle/libraries/upload.go b/bundle/libraries/upload.go index 4b6f43701..a2162fb7b 100644 --- a/bundle/libraries/upload.go +++ b/bundle/libraries/upload.go @@ -81,7 +81,6 @@ func collectLocalLibraries(b *bundle.Bundle) (map[string][]configLocation, error return v, nil }) }) - if err != nil { return nil, err } @@ -119,7 +118,6 @@ func collectLocalLibraries(b *bundle.Bundle) (map[string][]configLocation, error return v, nil }) }) - if err != nil { return nil, err } @@ -175,7 +173,6 @@ func (u *upload) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { return v, nil }) - if err != nil { diags = diags.Extend(diag.FromErr(err)) } diff --git a/bundle/permissions/filter.go b/bundle/permissions/filter.go index 60264f6ea..6fa8d1374 100644 --- a/bundle/permissions/filter.go +++ b/bundle/permissions/filter.go @@ -56,7 +56,6 @@ func filter(currentUser string) dyn.WalkValueFunc { } return v, nil - } } diff --git a/bundle/permissions/filter_test.go b/bundle/permissions/filter_test.go index 121ce10dc..e6e5a3799 100644 --- a/bundle/permissions/filter_test.go +++ b/bundle/permissions/filter_test.go @@ -90,7 +90,6 @@ func testFixture(userName string) *bundle.Bundle { }, }, } - } func TestFilterCurrentUser(t *testing.T) { diff --git a/bundle/permissions/mutator.go b/bundle/permissions/mutator.go index bc1392d93..cd7cbf40c 100644 --- a/bundle/permissions/mutator.go +++ b/bundle/permissions/mutator.go @@ -7,43 +7,52 @@ import ( "strings" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config/resources" "github.com/databricks/cli/libs/diag" + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/dyn/convert" ) -const CAN_MANAGE = "CAN_MANAGE" -const CAN_VIEW = "CAN_VIEW" -const CAN_RUN = "CAN_RUN" +const ( + CAN_MANAGE = "CAN_MANAGE" + CAN_VIEW = "CAN_VIEW" + CAN_RUN = "CAN_RUN" +) -var allowedLevels = []string{CAN_MANAGE, CAN_VIEW, CAN_RUN} -var levelsMap = map[string](map[string]string){ - "jobs": { - CAN_MANAGE: "CAN_MANAGE", - CAN_VIEW: "CAN_VIEW", - CAN_RUN: "CAN_MANAGE_RUN", - }, - "pipelines": { - CAN_MANAGE: "CAN_MANAGE", - CAN_VIEW: "CAN_VIEW", - CAN_RUN: "CAN_RUN", - }, - "mlflow_experiments": { - CAN_MANAGE: "CAN_MANAGE", - CAN_VIEW: "CAN_READ", - }, - "mlflow_models": { - CAN_MANAGE: "CAN_MANAGE", - CAN_VIEW: "CAN_READ", - }, - "model_serving_endpoints": { - CAN_MANAGE: "CAN_MANAGE", - CAN_VIEW: "CAN_VIEW", - CAN_RUN: "CAN_QUERY", - }, - "dashboards": { - CAN_MANAGE: "CAN_MANAGE", - CAN_VIEW: "CAN_READ", - }, -} +var unsupportedResources = []string{"clusters", "volumes", "schemas", "quality_monitors", "registered_models"} + +var ( + allowedLevels = []string{CAN_MANAGE, CAN_VIEW, CAN_RUN} + levelsMap = map[string](map[string]string){ + "jobs": { + CAN_MANAGE: "CAN_MANAGE", + CAN_VIEW: "CAN_VIEW", + CAN_RUN: "CAN_MANAGE_RUN", + }, + "pipelines": { + CAN_MANAGE: "CAN_MANAGE", + CAN_VIEW: "CAN_VIEW", + CAN_RUN: "CAN_RUN", + }, + "experiments": { + CAN_MANAGE: "CAN_MANAGE", + CAN_VIEW: "CAN_READ", + }, + "models": { + CAN_MANAGE: "CAN_MANAGE", + CAN_VIEW: "CAN_READ", + }, + "model_serving_endpoints": { + CAN_MANAGE: "CAN_MANAGE", + CAN_VIEW: "CAN_VIEW", + CAN_RUN: "CAN_QUERY", + }, + "dashboards": { + CAN_MANAGE: "CAN_MANAGE", + CAN_VIEW: "CAN_READ", + }, + } +) type bundlePermissions struct{} @@ -57,11 +66,55 @@ func (m *bundlePermissions) Apply(ctx context.Context, b *bundle.Bundle) diag.Di return diag.FromErr(err) } - applyForJobs(ctx, b) - applyForPipelines(ctx, b) - applyForMlModels(ctx, b) - applyForMlExperiments(ctx, b) - applyForModelServiceEndpoints(ctx, b) + patterns := make(map[string]dyn.Pattern, 0) + for key := range levelsMap { + patterns[key] = dyn.NewPattern( + dyn.Key("resources"), + dyn.Key(key), + dyn.AnyKey(), + ) + } + + err = b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) { + for key, pattern := range patterns { + v, err = dyn.MapByPattern(v, pattern, func(p dyn.Path, v dyn.Value) (dyn.Value, error) { + var permissions []resources.Permission + pv, err := dyn.Get(v, "permissions") + // If the permissions field is not found, we set to an empty array + if err != nil { + pv = dyn.V([]dyn.Value{}) + } + + err = convert.ToTyped(&permissions, pv) + if err != nil { + return dyn.InvalidValue, fmt.Errorf("failed to convert permissions: %w", err) + } + + permissions = append(permissions, convertPermissions( + ctx, + b.Config.Permissions, + permissions, + key, + levelsMap[key], + )...) + + pv, err = convert.FromTyped(permissions, dyn.NilValue) + if err != nil { + return dyn.InvalidValue, fmt.Errorf("failed to convert permissions: %w", err) + } + + return dyn.Set(v, "permissions", pv) + }) + if err != nil { + return dyn.InvalidValue, err + } + } + + return v, nil + }) + if err != nil { + return diag.FromErr(err) + } return nil } @@ -76,66 +129,6 @@ func validate(b *bundle.Bundle) error { return nil } -func applyForJobs(ctx context.Context, b *bundle.Bundle) { - for key, job := range b.Config.Resources.Jobs { - job.Permissions = append(job.Permissions, convert( - ctx, - b.Config.Permissions, - job.Permissions, - key, - levelsMap["jobs"], - )...) - } -} - -func applyForPipelines(ctx context.Context, b *bundle.Bundle) { - for key, pipeline := range b.Config.Resources.Pipelines { - pipeline.Permissions = append(pipeline.Permissions, convert( - ctx, - b.Config.Permissions, - pipeline.Permissions, - key, - levelsMap["pipelines"], - )...) - } -} - -func applyForMlExperiments(ctx context.Context, b *bundle.Bundle) { - for key, experiment := range b.Config.Resources.Experiments { - experiment.Permissions = append(experiment.Permissions, convert( - ctx, - b.Config.Permissions, - experiment.Permissions, - key, - levelsMap["mlflow_experiments"], - )...) - } -} - -func applyForMlModels(ctx context.Context, b *bundle.Bundle) { - for key, model := range b.Config.Resources.Models { - model.Permissions = append(model.Permissions, convert( - ctx, - b.Config.Permissions, - model.Permissions, - key, - levelsMap["mlflow_models"], - )...) - } -} - -func applyForModelServiceEndpoints(ctx context.Context, b *bundle.Bundle) { - for key, model := range b.Config.Resources.ModelServingEndpoints { - model.Permissions = append(model.Permissions, convert( - ctx, - b.Config.Permissions, - model.Permissions, - key, - levelsMap["model_serving_endpoints"], - )...) - } -} - func (m *bundlePermissions) Name() string { return "ApplyBundlePermissions" } diff --git a/bundle/permissions/mutator_test.go b/bundle/permissions/mutator_test.go index 1a177d902..78703e90f 100644 --- a/bundle/permissions/mutator_test.go +++ b/bundle/permissions/mutator_test.go @@ -2,12 +2,15 @@ package permissions import ( "context" + "fmt" + "slices" "testing" "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config/resources" "github.com/databricks/databricks-sdk-go/service/jobs" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -51,6 +54,10 @@ func TestApplyBundlePermissions(t *testing.T) { "endpoint_1": {}, "endpoint_2": {}, }, + Dashboards: map[string]*resources.Dashboard{ + "dashboard_1": {}, + "dashboard_2": {}, + }, }, }, } @@ -103,6 +110,10 @@ func TestApplyBundlePermissions(t *testing.T) { require.Contains(t, b.Config.Resources.ModelServingEndpoints["endpoint_2"].Permissions, resources.Permission{Level: "CAN_MANAGE", UserName: "TestUser"}) require.Contains(t, b.Config.Resources.ModelServingEndpoints["endpoint_2"].Permissions, resources.Permission{Level: "CAN_VIEW", GroupName: "TestGroup"}) require.Contains(t, b.Config.Resources.ModelServingEndpoints["endpoint_2"].Permissions, resources.Permission{Level: "CAN_QUERY", ServicePrincipalName: "TestServicePrincipal"}) + + require.Len(t, b.Config.Resources.Dashboards["dashboard_1"].Permissions, 2) + require.Contains(t, b.Config.Resources.Dashboards["dashboard_1"].Permissions, resources.Permission{Level: "CAN_MANAGE", UserName: "TestUser"}) + require.Contains(t, b.Config.Resources.Dashboards["dashboard_1"].Permissions, resources.Permission{Level: "CAN_READ", GroupName: "TestGroup"}) } func TestWarningOnOverlapPermission(t *testing.T) { @@ -146,5 +157,20 @@ func TestWarningOnOverlapPermission(t *testing.T) { require.Contains(t, b.Config.Resources.Jobs["job_2"].Permissions, resources.Permission{Level: "CAN_VIEW", UserName: "TestUser2"}) require.Contains(t, b.Config.Resources.Jobs["job_2"].Permissions, resources.Permission{Level: "CAN_MANAGE", UserName: "TestUser"}) require.Contains(t, b.Config.Resources.Jobs["job_2"].Permissions, resources.Permission{Level: "CAN_VIEW", GroupName: "TestGroup"}) - +} + +func TestAllResourcesExplicitlyDefinedForPermissionsSupport(t *testing.T) { + r := config.Resources{} + + for _, resource := range unsupportedResources { + _, ok := levelsMap[resource] + assert.False(t, ok, fmt.Sprintf("Resource %s is defined in both levelsMap and unsupportedResources", resource)) + } + + for _, resource := range r.AllResources() { + _, ok := levelsMap[resource.Description.PluralName] + if !slices.Contains(unsupportedResources, resource.Description.PluralName) && !ok { + assert.Fail(t, fmt.Sprintf("Resource %s is not explicitly defined in levelsMap or unsupportedResources", resource.Description.PluralName)) + } + } } diff --git a/bundle/permissions/utils.go b/bundle/permissions/utils.go index 9072cd252..cf16ea9b2 100644 --- a/bundle/permissions/utils.go +++ b/bundle/permissions/utils.go @@ -7,7 +7,7 @@ import ( "github.com/databricks/cli/libs/diag" ) -func convert( +func convertPermissions( ctx context.Context, bundlePermissions []resources.Permission, resourcePermissions []resources.Permission, diff --git a/bundle/permissions/validate.go b/bundle/permissions/validate.go index f1a18f430..dee7326cf 100644 --- a/bundle/permissions/validate.go +++ b/bundle/permissions/validate.go @@ -9,8 +9,7 @@ import ( "github.com/databricks/cli/libs/diag" ) -type validateSharedRootPermissions struct { -} +type validateSharedRootPermissions struct{} func ValidateSharedRootPermissions() bundle.Mutator { return &validateSharedRootPermissions{} diff --git a/bundle/permissions/workspace_path_permissions.go b/bundle/permissions/workspace_path_permissions.go index a3b4424c1..225d2499e 100644 --- a/bundle/permissions/workspace_path_permissions.go +++ b/bundle/permissions/workspace_path_permissions.go @@ -52,7 +52,7 @@ func (p WorkspacePathPermissions) Compare(perms []resources.Permission) diag.Dia } // containsAll checks if permA contains all permissions in permB. -func containsAll(permA []resources.Permission, permB []resources.Permission) (bool, []resources.Permission) { +func containsAll(permA, permB []resources.Permission) (bool, []resources.Permission) { missing := make([]resources.Permission, 0) for _, a := range permA { found := false diff --git a/bundle/permissions/workspace_path_permissions_test.go b/bundle/permissions/workspace_path_permissions_test.go index 0bb00474c..eaefad906 100644 --- a/bundle/permissions/workspace_path_permissions_test.go +++ b/bundle/permissions/workspace_path_permissions_test.go @@ -117,5 +117,4 @@ func TestWorkspacePathPermissionsCompare(t *testing.T) { diags := wp.Compare(tc.perms) require.Equal(t, tc.expected, diags) } - } diff --git a/bundle/permissions/workspace_root.go b/bundle/permissions/workspace_root.go index de4f3a7fe..4ac0d38a5 100644 --- a/bundle/permissions/workspace_root.go +++ b/bundle/permissions/workspace_root.go @@ -12,8 +12,7 @@ import ( "golang.org/x/sync/errgroup" ) -type workspaceRootPermissions struct { -} +type workspaceRootPermissions struct{} func ApplyWorkspaceRootPermissions() bundle.Mutator { return &workspaceRootPermissions{} diff --git a/bundle/phases/bind.go b/bundle/phases/bind.go index b2e92d6e2..c62c48aea 100644 --- a/bundle/phases/bind.go +++ b/bundle/phases/bind.go @@ -25,7 +25,7 @@ func Bind(opts *terraform.BindOptions) bundle.Mutator { ) } -func Unbind(resourceType string, resourceKey string) bundle.Mutator { +func Unbind(resourceType, resourceKey string) bundle.Mutator { return newPhase( "unbind", []bundle.Mutator{ diff --git a/bundle/render/render_text_output.go b/bundle/render/render_text_output.go index 92dacb448..bacb85735 100644 --- a/bundle/render/render_text_output.go +++ b/bundle/render/render_text_output.go @@ -110,7 +110,7 @@ func renderSummaryHeaderTemplate(out io.Writer, b *bundle.Bundle) error { return renderSummaryHeaderTemplate(out, &bundle.Bundle{}) } - var currentUser = &iam.User{} + currentUser := &iam.User{} if b.Config.Workspace.CurrentUser != nil { if b.Config.Workspace.CurrentUser.User != nil { @@ -171,10 +171,16 @@ func RenderDiagnostics(out io.Writer, b *bundle.Bundle, diags diag.Diagnostics, if err != nil { return fmt.Errorf("failed to render summary: %w", err) } - io.WriteString(out, "\n") + _, err = io.WriteString(out, "\n") + if err != nil { + return err + } } trailer := buildTrailer(diags) - io.WriteString(out, trailer) + _, err = io.WriteString(out, trailer) + if err != nil { + return err + } } return nil diff --git a/bundle/render/render_text_output_test.go b/bundle/render/render_text_output_test.go index 135d79dae..506756f70 100644 --- a/bundle/render/render_text_output_test.go +++ b/bundle/render/render_text_output_test.go @@ -376,7 +376,8 @@ func TestRenderDiagnostics(t *testing.T) { Locations: []dyn.Location{{ File: "foo.yaml", Line: 1, - Column: 2}}, + Column: 2, + }}, }, }, expected: "Error: failed to load xxx\n" + @@ -489,7 +490,8 @@ func TestRenderSummaryTemplate_nilBundle(t *testing.T) { err := renderSummaryHeaderTemplate(writer, nil) require.NoError(t, err) - io.WriteString(writer, buildTrailer(nil)) + _, err = io.WriteString(writer, buildTrailer(nil)) + require.NoError(t, err) assert.Equal(t, "Validation OK!\n", writer.String()) } diff --git a/bundle/root_test.go b/bundle/root_test.go index 99bf58a00..075242710 100644 --- a/bundle/root_test.go +++ b/bundle/root_test.go @@ -71,7 +71,7 @@ func TestRootLookup(t *testing.T) { defer f.Close() // Create directory tree. - err = os.MkdirAll("./a/b/c", 0755) + err = os.MkdirAll("./a/b/c", 0o755) require.NoError(t, err) // It should find the project root from $PWD. diff --git a/bundle/run/job.go b/bundle/run/job.go index 340af961c..b43db9184 100644 --- a/bundle/run/job.go +++ b/bundle/run/job.go @@ -143,7 +143,7 @@ func logProgressCallback(ctx context.Context, progressLogger *cmdio.Logger) func progressLogger.Log(event) // log progress events in using the default logger - log.Infof(ctx, event.String()) + log.Info(ctx, event.String()) } } @@ -203,7 +203,7 @@ func (r *jobRunner) Run(ctx context.Context, opts *Options) (output.RunOutput, e logDebug(r) logProgress(r) }).GetWithTimeout(jobRunTimeout) - if err != nil && runId != nil { + if err != nil { r.logFailedTasks(ctx, *runId) } if err != nil { @@ -289,7 +289,6 @@ func (r *jobRunner) Cancel(ctx context.Context) error { ActiveOnly: true, JobId: jobID, }) - if err != nil { return err } diff --git a/bundle/run/job_args.go b/bundle/run/job_args.go index 85cf96efb..b1596bbb0 100644 --- a/bundle/run/job_args.go +++ b/bundle/run/job_args.go @@ -131,7 +131,7 @@ func (r *jobRunner) posArgsHandler() argsHandler { } // Handle task parameters otherwise. - var seen = make(map[jobTaskType]bool) + seen := make(map[jobTaskType]bool) for _, t := range job.Tasks { if t.NotebookTask != nil { seen[jobTaskTypeNotebook] = true diff --git a/bundle/run/job_options.go b/bundle/run/job_options.go index c359e79eb..6a03dff95 100644 --- a/bundle/run/job_options.go +++ b/bundle/run/job_options.go @@ -80,7 +80,7 @@ func (o *JobOptions) validatePipelineParams() (*jobs.PipelineParams, error) { return nil, nil } - var defaultErr = fmt.Errorf("job run argument --pipeline-params only supports `full_refresh=`") + defaultErr := fmt.Errorf("job run argument --pipeline-params only supports `full_refresh=`") v, ok := o.pipelineParams["full_refresh"] if !ok { return nil, defaultErr diff --git a/bundle/run/job_test.go b/bundle/run/job_test.go index 369c546aa..5d19ca4ff 100644 --- a/bundle/run/job_test.go +++ b/bundle/run/job_test.go @@ -42,7 +42,8 @@ func TestConvertPythonParams(t *testing.T) { opts := &Options{ Job: JobOptions{}, } - runner.convertPythonParams(opts) + err := runner.convertPythonParams(opts) + require.NoError(t, err) require.NotContains(t, opts.Job.notebookParams, "__python_params") opts = &Options{ @@ -50,7 +51,8 @@ func TestConvertPythonParams(t *testing.T) { pythonParams: []string{"param1", "param2", "param3"}, }, } - runner.convertPythonParams(opts) + err = runner.convertPythonParams(opts) + require.NoError(t, err) require.Contains(t, opts.Job.notebookParams, "__python_params") require.Equal(t, opts.Job.notebookParams["__python_params"], `["param1","param2","param3"]`) } @@ -158,7 +160,7 @@ func TestJobRunnerRestart(t *testing.T) { m := mocks.NewMockWorkspaceClient(t) b.SetWorkpaceClient(m.WorkspaceClient) ctx := context.Background() - ctx = cmdio.InContext(ctx, cmdio.NewIO(flags.OutputText, &bytes.Buffer{}, &bytes.Buffer{}, &bytes.Buffer{}, "", "")) + ctx = cmdio.InContext(ctx, cmdio.NewIO(ctx, flags.OutputText, &bytes.Buffer{}, &bytes.Buffer{}, &bytes.Buffer{}, "", "")) ctx = cmdio.NewContext(ctx, cmdio.NewLogger(flags.ModeAppend)) jobApi := m.GetMockJobsAPI() @@ -229,7 +231,7 @@ func TestJobRunnerRestartForContinuousUnpausedJobs(t *testing.T) { m := mocks.NewMockWorkspaceClient(t) b.SetWorkpaceClient(m.WorkspaceClient) ctx := context.Background() - ctx = cmdio.InContext(ctx, cmdio.NewIO(flags.OutputText, &bytes.Buffer{}, &bytes.Buffer{}, &bytes.Buffer{}, "", "...")) + ctx = cmdio.InContext(ctx, cmdio.NewIO(ctx, flags.OutputText, &bytes.Buffer{}, &bytes.Buffer{}, &bytes.Buffer{}, "", "...")) ctx = cmdio.NewContext(ctx, cmdio.NewLogger(flags.ModeAppend)) jobApi := m.GetMockJobsAPI() diff --git a/bundle/run/output/task.go b/bundle/run/output/task.go index 402e4d66a..1ef78a8c3 100644 --- a/bundle/run/output/task.go +++ b/bundle/run/output/task.go @@ -7,13 +7,15 @@ import ( "github.com/databricks/databricks-sdk-go/service/jobs" ) -type NotebookOutput jobs.NotebookOutput -type DbtOutput jobs.DbtOutput -type SqlOutput jobs.SqlOutput -type LogsOutput struct { - Logs string `json:"logs"` - LogsTruncated bool `json:"logs_truncated"` -} +type ( + NotebookOutput jobs.NotebookOutput + DbtOutput jobs.DbtOutput + SqlOutput jobs.SqlOutput + LogsOutput struct { + Logs string `json:"logs"` + LogsTruncated bool `json:"logs_truncated"` + } +) func structToString(val any) (string, error) { b, err := json.MarshalIndent(val, "", " ") diff --git a/bundle/run/pipeline.go b/bundle/run/pipeline.go index ffe012843..a0e7d1e1e 100644 --- a/bundle/run/pipeline.go +++ b/bundle/run/pipeline.go @@ -37,11 +37,11 @@ func (r *pipelineRunner) logEvent(ctx context.Context, event pipelines.PipelineE } } if logString != "" { - log.Errorf(ctx, fmt.Sprintf("[%s] %s", event.EventType, logString)) + log.Errorf(ctx, "[%s] %s", event.EventType, logString) } } -func (r *pipelineRunner) logErrorEvent(ctx context.Context, pipelineId string, updateId string) error { +func (r *pipelineRunner) logErrorEvent(ctx context.Context, pipelineId, updateId string) error { w := r.bundle.WorkspaceClient() // Note: For a 100 percent correct and complete solution we should use the @@ -85,7 +85,7 @@ func (r *pipelineRunner) Name() string { } func (r *pipelineRunner) Run(ctx context.Context, opts *Options) (output.RunOutput, error) { - var pipelineID = r.pipeline.ID + pipelineID := r.pipeline.ID // Include resource key in logger. ctx = log.NewContext(ctx, log.GetLogger(ctx).With("resource", r.Key())) @@ -132,7 +132,7 @@ func (r *pipelineRunner) Run(ctx context.Context, opts *Options) (output.RunOutp } for _, event := range events { progressLogger.Log(&event) - log.Infof(ctx, event.String()) + log.Info(ctx, event.String()) } update, err := w.Pipelines.GetUpdateByPipelineIdAndUpdateId(ctx, pipelineID, updateID) @@ -173,7 +173,6 @@ func (r *pipelineRunner) Cancel(ctx context.Context) error { wait, err := w.Pipelines.Stop(ctx, pipelines.StopRequest{ PipelineId: r.pipeline.ID, }) - if err != nil { return err } diff --git a/bundle/run/pipeline_test.go b/bundle/run/pipeline_test.go index e4608061c..66f9d86be 100644 --- a/bundle/run/pipeline_test.go +++ b/bundle/run/pipeline_test.go @@ -76,7 +76,7 @@ func TestPipelineRunnerRestart(t *testing.T) { } b.SetWorkpaceClient(m.WorkspaceClient) ctx := context.Background() - ctx = cmdio.InContext(ctx, cmdio.NewIO(flags.OutputText, &bytes.Buffer{}, &bytes.Buffer{}, &bytes.Buffer{}, "", "...")) + ctx = cmdio.InContext(ctx, cmdio.NewIO(ctx, flags.OutputText, &bytes.Buffer{}, &bytes.Buffer{}, &bytes.Buffer{}, "", "...")) ctx = cmdio.NewContext(ctx, cmdio.NewLogger(flags.ModeAppend)) mockWait := &pipelines.WaitGetPipelineIdle[struct{}]{ diff --git a/bundle/run/progress/pipeline.go b/bundle/run/progress/pipeline.go index 4a256e76c..b82dd7abd 100644 --- a/bundle/run/progress/pipeline.go +++ b/bundle/run/progress/pipeline.go @@ -51,7 +51,7 @@ type UpdateTracker struct { w *databricks.WorkspaceClient } -func NewUpdateTracker(pipelineId string, updateId string, w *databricks.WorkspaceClient) *UpdateTracker { +func NewUpdateTracker(pipelineId, updateId string, w *databricks.WorkspaceClient) *UpdateTracker { return &UpdateTracker{ w: w, PipelineId: pipelineId, diff --git a/bundle/schema/embed_test.go b/bundle/schema/embed_test.go index e4b45baa5..59f1458cb 100644 --- a/bundle/schema/embed_test.go +++ b/bundle/schema/embed_test.go @@ -41,33 +41,23 @@ func TestJsonSchema(t *testing.T) { resourceJob := walk(s.Definitions, "github.com", "databricks", "cli", "bundle", "config", "resources.Job") fields := []string{"name", "continuous", "tasks", "trigger"} for _, field := range fields { - assert.NotEmpty(t, resourceJob.AnyOf[0].Properties[field].Description) + assert.NotEmpty(t, resourceJob.OneOf[0].Properties[field].Description) } // Assert descriptions were also loaded for a job task definition. jobTask := walk(s.Definitions, "github.com", "databricks", "databricks-sdk-go", "service", "jobs.Task") fields = []string{"notebook_task", "spark_jar_task", "spark_python_task", "spark_submit_task", "description", "depends_on", "environment_key", "for_each_task", "existing_cluster_id"} for _, field := range fields { - assert.NotEmpty(t, jobTask.AnyOf[0].Properties[field].Description) + assert.NotEmpty(t, jobTask.OneOf[0].Properties[field].Description) } // Assert descriptions are loaded for pipelines pipeline := walk(s.Definitions, "github.com", "databricks", "cli", "bundle", "config", "resources.Pipeline") fields = []string{"name", "catalog", "clusters", "channel", "continuous", "development"} for _, field := range fields { - assert.NotEmpty(t, pipeline.AnyOf[0].Properties[field].Description) + assert.NotEmpty(t, pipeline.OneOf[0].Properties[field].Description) } - // Assert enum values are loaded - schedule := walk(s.Definitions, "github.com", "databricks", "databricks-sdk-go", "service", "pipelines.RestartWindow") - assert.Contains(t, schedule.AnyOf[0].Properties["days_of_week"].Enum, "MONDAY") - assert.Contains(t, schedule.AnyOf[0].Properties["days_of_week"].Enum, "TUESDAY") - assert.Contains(t, schedule.AnyOf[0].Properties["days_of_week"].Enum, "WEDNESDAY") - assert.Contains(t, schedule.AnyOf[0].Properties["days_of_week"].Enum, "THURSDAY") - assert.Contains(t, schedule.AnyOf[0].Properties["days_of_week"].Enum, "FRIDAY") - assert.Contains(t, schedule.AnyOf[0].Properties["days_of_week"].Enum, "SATURDAY") - assert.Contains(t, schedule.AnyOf[0].Properties["days_of_week"].Enum, "SUNDAY") - providers := walk(s.Definitions, "github.com", "databricks", "databricks-sdk-go", "service", "jobs.GitProvider") assert.Contains(t, providers.Enum, "gitHub") assert.Contains(t, providers.Enum, "bitbucketCloud") diff --git a/bundle/schema/jsonschema.json b/bundle/schema/jsonschema.json index f791b8440..9a352ebb2 100644 --- a/bundle/schema/jsonschema.json +++ b/bundle/schema/jsonschema.json @@ -1,7 +1,7 @@ { "$defs": { "bool": { - "anyOf": [ + "oneOf": [ { "type": "boolean" }, @@ -28,7 +28,7 @@ ] }, "float64": { - "anyOf": [ + "oneOf": [ { "type": "number" }, @@ -60,7 +60,7 @@ "bundle": { "config": { "resources.Cluster": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -130,6 +130,13 @@ "description": "The optional ID of the instance pool to which the cluster belongs.", "$ref": "#/$defs/string" }, + "is_single_node": { + "description": "This field can only be used with `kind`.\n\nWhen set to true, Databricks will automatically set single node related `custom_tags`, `spark_conf`, and `num_workers`\n", + "$ref": "#/$defs/bool" + }, + "kind": { + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/compute.Kind" + }, "node_type_id": { "description": "This field encodes, through a single value, the resources available to each of\nthe Spark nodes in this cluster. For example, the Spark nodes can be provisioned\nand optimized for memory or compute intensive workloads. A list of available node\ntypes can be retrieved by using the :method:clusters/listNodeTypes API call.\n", "$ref": "#/$defs/string" @@ -168,6 +175,10 @@ "description": "SSH public key contents that will be added to each Spark node in this cluster. The\ncorresponding private keys can be used to login with the user name `ubuntu` on port `2200`.\nUp to 10 keys can be specified.", "$ref": "#/$defs/slice/string" }, + "use_ml_runtime": { + "description": "This field can only be used with `kind`.\n\n`effective_spark_version` is determined by `spark_version` (DBR release), this field `use_ml_runtime`, and whether `node_type_id` is gpu node or not.\n", + "$ref": "#/$defs/bool" + }, "workload_type": { "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/compute.WorkloadType" } @@ -181,7 +192,7 @@ ] }, "resources.Dashboard": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -244,14 +255,16 @@ ] }, "resources.Grant": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { "principal": { + "description": "The name of the principal that will be granted privileges", "$ref": "#/$defs/string" }, "privileges": { + "description": "The privileges to grant to the specified entity", "$ref": "#/$defs/slice/string" } }, @@ -268,7 +281,7 @@ ] }, "resources.Job": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -363,7 +376,7 @@ ] }, "resources.MlflowExperiment": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -408,7 +421,7 @@ ] }, "resources.MlflowModel": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -453,7 +466,7 @@ ] }, "resources.ModelServingEndpoint": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -498,20 +511,24 @@ ] }, "resources.Permission": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { "group_name": { + "description": "The name of the group that has the permission set in level.", "$ref": "#/$defs/string" }, "level": { + "description": "The allowed permission for user, group, service principal defined for this permission.", "$ref": "#/$defs/string" }, "service_principal_name": { + "description": "The name of the service principal that has the permission set in level.", "$ref": "#/$defs/string" }, "user_name": { + "description": "The name of the user that has the permission set in level.", "$ref": "#/$defs/string" } }, @@ -527,7 +544,7 @@ ] }, "resources.Pipeline": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -636,7 +653,7 @@ ] }, "resources.QualityMonitor": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -710,7 +727,7 @@ ] }, "resources.RegisteredModel": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -752,7 +769,7 @@ ] }, "resources.Schema": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -792,7 +809,7 @@ ] }, "resources.Volume": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -837,7 +854,7 @@ ] }, "variable.Lookup": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -895,12 +912,15 @@ "$ref": "#/$defs/interface" }, "description": { + "description": "The description of the variable.", "$ref": "#/$defs/string" }, "lookup": { + "description": "The name of the alert, cluster_policy, cluster, dashboard, instance_pool, job, metastore, pipeline, query, service_principal, or warehouse object for which to retrieve an ID.", "$ref": "#/$defs/github.com/databricks/cli/bundle/config/variable.Lookup" }, "type": { + "description": "The type of the variable.", "$ref": "#/$defs/github.com/databricks/cli/bundle/config/variable.VariableType" } }, @@ -916,12 +936,16 @@ "$ref": "#/$defs/interface" }, "description": { + "description": "The description of the variable", "$ref": "#/$defs/string" }, "lookup": { - "$ref": "#/$defs/github.com/databricks/cli/bundle/config/variable.Lookup" + "description": "The name of the alert, cluster_policy, cluster, dashboard, instance_pool, job, metastore, pipeline, query, service_principal, or warehouse object for which to retrieve an ID.", + "$ref": "#/$defs/github.com/databricks/cli/bundle/config/variable.Lookup", + "markdownDescription": "The name of the `alert`, `cluster_policy`, `cluster`, `dashboard`, `instance_pool`, `job`, `metastore`, `pipeline`, `query`, `service_principal`, or `warehouse` object for which to retrieve an ID.\"" }, "type": { + "description": "The type of the variable.", "$ref": "#/$defs/github.com/databricks/cli/bundle/config/variable.VariableType" } }, @@ -932,24 +956,31 @@ } }, "config.Artifact": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { "build": { + "description": "An optional set of non-default build commands that you want to run locally before deployment.\n\nFor Python wheel builds, the Databricks CLI assumes that it can find a local install of the Python wheel package to run builds, and it runs the command python setup.py bdist_wheel by default during each bundle deployment.\n\nTo specify multiple build commands, separate each command with double-ampersand (\u0026\u0026) characters.", "$ref": "#/$defs/string" }, "executable": { + "description": "The executable type.", "$ref": "#/$defs/github.com/databricks/cli/libs/exec.ExecutableType" }, "files": { - "$ref": "#/$defs/slice/github.com/databricks/cli/bundle/config.ArtifactFile" + "description": "The source files for the artifact.", + "$ref": "#/$defs/slice/github.com/databricks/cli/bundle/config.ArtifactFile", + "markdownDescription": "The source files for the artifact, defined as an [artifact_file](https://docs.databricks.com/dev-tools/bundles/reference.html#artifact_file)." }, "path": { + "description": "The location where the built artifact will be saved.", "$ref": "#/$defs/string" }, "type": { - "$ref": "#/$defs/github.com/databricks/cli/bundle/config.ArtifactType" + "description": "The type of the artifact.", + "$ref": "#/$defs/github.com/databricks/cli/bundle/config.ArtifactType", + "markdownDescription": "The type of the artifact. Valid values are `wheel` or `jar`" } }, "additionalProperties": false, @@ -964,11 +995,12 @@ ] }, "config.ArtifactFile": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { "source": { + "description": "The path of the files used to build the artifact.", "$ref": "#/$defs/string" } }, @@ -987,26 +1019,35 @@ "type": "string" }, "config.Bundle": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { "cluster_id": { - "$ref": "#/$defs/string" + "description": "The ID of a cluster to use to run the bundle.", + "$ref": "#/$defs/string", + "markdownDescription": "The ID of a cluster to use to run the bundle. See [cluster_id](https://docs.databricks.com/dev-tools/bundles/settings.html#cluster_id)." }, "compute_id": { "$ref": "#/$defs/string" }, "databricks_cli_version": { - "$ref": "#/$defs/string" + "description": "The Databricks CLI version to use for the bundle.", + "$ref": "#/$defs/string", + "markdownDescription": "The Databricks CLI version to use for the bundle. See [databricks_cli_version](https://docs.databricks.com/dev-tools/bundles/settings.html#databricks_cli_version)." }, "deployment": { - "$ref": "#/$defs/github.com/databricks/cli/bundle/config.Deployment" + "description": "The definition of the bundle deployment", + "$ref": "#/$defs/github.com/databricks/cli/bundle/config.Deployment", + "markdownDescription": "The definition of the bundle deployment. For supported attributes, see [deployment](https://docs.databricks.com/dev-tools/bundles/reference.html#deployment) and [link](https://docs.databricks.com/dev-tools/bundles/deployment-modes.html)." }, "git": { - "$ref": "#/$defs/github.com/databricks/cli/bundle/config.Git" + "description": "The Git version control details that are associated with your bundle.", + "$ref": "#/$defs/github.com/databricks/cli/bundle/config.Git", + "markdownDescription": "The Git version control details that are associated with your bundle. For supported attributes, see [git](https://docs.databricks.com/dev-tools/bundles/reference.html#git) and [git](https://docs.databricks.com/dev-tools/bundles/settings.html#git)." }, "name": { + "description": "The name of the bundle.", "$ref": "#/$defs/string" }, "uuid": { @@ -1028,15 +1069,18 @@ "type": "string" }, "config.Deployment": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { "fail_on_active_runs": { + "description": "Whether to fail on active runs. If this is set to true a deployment that is running can be interrupted.", "$ref": "#/$defs/bool" }, "lock": { - "$ref": "#/$defs/github.com/databricks/cli/bundle/config.Lock" + "description": "The deployment lock attributes.", + "$ref": "#/$defs/github.com/databricks/cli/bundle/config.Lock", + "markdownDescription": "The deployment lock attributes. See [lock](https://docs.databricks.com/dev-tools/bundles/reference.html#lock)." } }, "additionalProperties": false @@ -1048,20 +1092,24 @@ ] }, "config.Experimental": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { "pydabs": { + "description": "The PyDABs configuration.", "$ref": "#/$defs/github.com/databricks/cli/bundle/config.PyDABs" }, "python_wheel_wrapper": { + "description": "Whether to use a Python wheel wrapper", "$ref": "#/$defs/bool" }, "scripts": { + "description": "The commands to run", "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config.Command" }, "use_legacy_run_as": { + "description": "Whether to use the legacy run_as behavior", "$ref": "#/$defs/bool" } }, @@ -1074,15 +1122,19 @@ ] }, "config.Git": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { "branch": { - "$ref": "#/$defs/string" + "description": "The Git branch name.", + "$ref": "#/$defs/string", + "markdownDescription": "The Git branch name. See [git](https://docs.databricks.com/dev-tools/bundles/settings.html#git)." }, "origin_url": { - "$ref": "#/$defs/string" + "description": "The origin URL of the repository.", + "$ref": "#/$defs/string", + "markdownDescription": "The origin URL of the repository. See [git](https://docs.databricks.com/dev-tools/bundles/settings.html#git)." } }, "additionalProperties": false @@ -1094,14 +1146,16 @@ ] }, "config.Lock": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { "enabled": { + "description": "Whether this lock is enabled.", "$ref": "#/$defs/bool" }, "force": { + "description": "Whether to force this lock if it is enabled.", "$ref": "#/$defs/bool" } }, @@ -1117,26 +1171,32 @@ "type": "string" }, "config.Presets": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { "jobs_max_concurrent_runs": { + "description": "The maximum concurrent runs for a job.", "$ref": "#/$defs/int" }, "name_prefix": { + "description": "The prefix for job runs of the bundle.", "$ref": "#/$defs/string" }, "pipelines_development": { + "description": "Whether pipeline deployments should be locked in development mode.", "$ref": "#/$defs/bool" }, "source_linked_deployment": { + "description": "Whether to link the deployment to the bundle source.", "$ref": "#/$defs/bool" }, "tags": { + "description": "The tags for the bundle deployment.", "$ref": "#/$defs/map/string" }, "trigger_pause_status": { + "description": "A pause status to apply to all job triggers and schedules. Valid values are PAUSED or UNPAUSED.", "$ref": "#/$defs/string" } }, @@ -1149,17 +1209,20 @@ ] }, "config.PyDABs": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { "enabled": { + "description": "Whether or not PyDABs (Private Preview) is enabled", "$ref": "#/$defs/bool" }, "import": { + "description": "The PyDABs project to import to discover resources, resource generator and mutators", "$ref": "#/$defs/slice/string" }, "venv_path": { + "description": "The Python virtual environment path", "$ref": "#/$defs/string" } }, @@ -1172,39 +1235,59 @@ ] }, "config.Resources": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { "clusters": { - "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.Cluster" + "description": "The cluster definitions for the bundle.", + "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.Cluster", + "markdownDescription": "The cluster definitions for the bundle. See [cluster](https://docs.databricks.com/dev-tools/bundles/resources.html#cluster)" }, "dashboards": { - "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.Dashboard" + "description": "The dashboard definitions for the bundle.", + "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.Dashboard", + "markdownDescription": "The dashboard definitions for the bundle. See [dashboard](https://docs.databricks.com/dev-tools/bundles/resources.html#dashboard)" }, "experiments": { - "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.MlflowExperiment" + "description": "The experiment definitions for the bundle.", + "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.MlflowExperiment", + "markdownDescription": "The experiment definitions for the bundle. See [experiment](https://docs.databricks.com/dev-tools/bundles/resources.html#experiment)" }, "jobs": { - "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.Job" + "description": "The job definitions for the bundle.", + "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.Job", + "markdownDescription": "The job definitions for the bundle. See [job](https://docs.databricks.com/dev-tools/bundles/resources.html#job)" }, "model_serving_endpoints": { - "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.ModelServingEndpoint" + "description": "The model serving endpoint definitions for the bundle.", + "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.ModelServingEndpoint", + "markdownDescription": "The model serving endpoint definitions for the bundle. See [model_serving_endpoint](https://docs.databricks.com/dev-tools/bundles/resources.html#model_serving_endpoint)" }, "models": { - "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.MlflowModel" + "description": "The model definitions for the bundle.", + "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.MlflowModel", + "markdownDescription": "The model definitions for the bundle. See [model](https://docs.databricks.com/dev-tools/bundles/resources.html#model)" }, "pipelines": { - "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.Pipeline" + "description": "The pipeline definitions for the bundle.", + "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.Pipeline", + "markdownDescription": "The pipeline definitions for the bundle. See [pipeline](https://docs.databricks.com/dev-tools/bundles/resources.html#pipeline)" }, "quality_monitors": { - "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.QualityMonitor" + "description": "The quality monitor definitions for the bundle.", + "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.QualityMonitor", + "markdownDescription": "The quality monitor definitions for the bundle. See [quality_monitor](https://docs.databricks.com/dev-tools/bundles/resources.html#quality_monitor)" }, "registered_models": { - "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.RegisteredModel" + "description": "The registered model definitions for the bundle.", + "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.RegisteredModel", + "markdownDescription": "The registered model definitions for the bundle. See [registered_model](https://docs.databricks.com/dev-tools/bundles/resources.html#registered_model)" }, "schemas": { - "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.Schema" + "description": "The schema definitions for the bundle.", + "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.Schema", + "markdownDescription": "The schema definitions for the bundle. See [schema](https://docs.databricks.com/dev-tools/bundles/resources.html#schema)" }, "volumes": { "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.Volume" @@ -1219,17 +1302,20 @@ ] }, "config.Sync": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { "exclude": { + "description": "A list of files or folders to exclude from the bundle.", "$ref": "#/$defs/slice/string" }, "include": { + "description": "A list of files or folders to include in the bundle.", "$ref": "#/$defs/slice/string" }, "paths": { + "description": "The local folder paths, which can be outside the bundle root, to synchronize to the workspace when the bundle is deployed.", "$ref": "#/$defs/slice/string" } }, @@ -1242,51 +1328,75 @@ ] }, "config.Target": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { "artifacts": { - "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config.Artifact" + "description": "The artifacts to include in the target deployment.", + "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config.Artifact", + "markdownDescription": "The artifacts to include in the target deployment. See [artifact](https://docs.databricks.com/dev-tools/bundles/reference.html#artifact)" }, "bundle": { + "description": "The name of the bundle when deploying to this target.", "$ref": "#/$defs/github.com/databricks/cli/bundle/config.Bundle" }, "cluster_id": { + "description": "The ID of the cluster to use for this target.", "$ref": "#/$defs/string" }, "compute_id": { + "description": "Deprecated. The ID of the compute to use for this target.", "$ref": "#/$defs/string" }, "default": { + "description": "Whether this target is the default target.", "$ref": "#/$defs/bool" }, "git": { - "$ref": "#/$defs/github.com/databricks/cli/bundle/config.Git" + "description": "The Git version control settings for the target.", + "$ref": "#/$defs/github.com/databricks/cli/bundle/config.Git", + "markdownDescription": "The Git version control settings for the target. See [git](https://docs.databricks.com/dev-tools/bundles/reference.html#git)." }, "mode": { - "$ref": "#/$defs/github.com/databricks/cli/bundle/config.Mode" + "description": "The deployment mode for the target.", + "$ref": "#/$defs/github.com/databricks/cli/bundle/config.Mode", + "markdownDescription": "The deployment mode for the target. Valid values are `development` or `production`. See [link](https://docs.databricks.com/dev-tools/bundles/deployment-modes.html)." }, "permissions": { - "$ref": "#/$defs/slice/github.com/databricks/cli/bundle/config/resources.Permission" + "description": "The permissions for deploying and running the bundle in the target.", + "$ref": "#/$defs/slice/github.com/databricks/cli/bundle/config/resources.Permission", + "markdownDescription": "The permissions for deploying and running the bundle in the target. See [permission](https://docs.databricks.com/dev-tools/bundles/reference.html#permission)." }, "presets": { - "$ref": "#/$defs/github.com/databricks/cli/bundle/config.Presets" + "description": "The deployment presets for the target.", + "$ref": "#/$defs/github.com/databricks/cli/bundle/config.Presets", + "markdownDescription": "The deployment presets for the target. See [preset](https://docs.databricks.com/dev-tools/bundles/reference.html#preset)." }, "resources": { - "$ref": "#/$defs/github.com/databricks/cli/bundle/config.Resources" + "description": "The resource definitions for the target.", + "$ref": "#/$defs/github.com/databricks/cli/bundle/config.Resources", + "markdownDescription": "The resource definitions for the target. See [resources](https://docs.databricks.com/dev-tools/bundles/reference.html#resources)." }, "run_as": { - "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.JobRunAs" + "description": "The identity to use to run the bundle.", + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.JobRunAs", + "markdownDescription": "The identity to use to run the bundle. See [job_run_as](https://docs.databricks.com/dev-tools/bundles/reference.html#job_run_as) and [link](https://docs.databricks.com/dev-tools/bundles/run_as.html)." }, "sync": { - "$ref": "#/$defs/github.com/databricks/cli/bundle/config.Sync" + "description": "The local paths to sync to the target workspace when a bundle is run or deployed.", + "$ref": "#/$defs/github.com/databricks/cli/bundle/config.Sync", + "markdownDescription": "The local paths to sync to the target workspace when a bundle is run or deployed. See [sync](https://docs.databricks.com/dev-tools/bundles/reference.html#sync)." }, "variables": { - "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/variable.TargetVariable" + "description": "The custom variable definitions for the target.", + "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/variable.TargetVariable", + "markdownDescription": "The custom variable definitions for the target. See [variables](https://docs.databricks.com/dev-tools/bundles/settings.html#variables) and [link](https://docs.databricks.com/dev-tools/bundles/variables.html)." }, "workspace": { - "$ref": "#/$defs/github.com/databricks/cli/bundle/config.Workspace" + "description": "The Databricks workspace for the target.", + "$ref": "#/$defs/github.com/databricks/cli/bundle/config.Workspace", + "markdownDescription": "The Databricks workspace for the target. [workspace](https://docs.databricks.com/dev-tools/bundles/reference.html#workspace)" } }, "additionalProperties": false @@ -1298,56 +1408,72 @@ ] }, "config.Workspace": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { "artifact_path": { + "description": "The artifact path to use within the workspace for both deployments and workflow runs", "$ref": "#/$defs/string" }, "auth_type": { + "description": "The authentication type.", "$ref": "#/$defs/string" }, "azure_client_id": { + "description": "The Azure client ID", "$ref": "#/$defs/string" }, "azure_environment": { + "description": "The Azure environment", "$ref": "#/$defs/string" }, "azure_login_app_id": { + "description": "The Azure login app ID", "$ref": "#/$defs/string" }, "azure_tenant_id": { + "description": "The Azure tenant ID", "$ref": "#/$defs/string" }, "azure_use_msi": { + "description": "Whether to use MSI for Azure", "$ref": "#/$defs/bool" }, "azure_workspace_resource_id": { + "description": "The Azure workspace resource ID", "$ref": "#/$defs/string" }, "client_id": { + "description": "The client ID for the workspace", "$ref": "#/$defs/string" }, "file_path": { + "description": "The file path to use within the workspace for both deployments and workflow runs", "$ref": "#/$defs/string" }, "google_service_account": { + "description": "The Google service account name", "$ref": "#/$defs/string" }, "host": { + "description": "The Databricks workspace host URL", "$ref": "#/$defs/string" }, "profile": { + "description": "The Databricks workspace profile name", "$ref": "#/$defs/string" }, "resource_path": { + "description": "The workspace resource path", "$ref": "#/$defs/string" }, "root_path": { + "description": "The Databricks workspace root path", "$ref": "#/$defs/string" }, "state_path": { + "description": "The workspace state path", "$ref": "#/$defs/string" } }, @@ -1369,7 +1495,7 @@ "databricks-sdk-go": { "service": { "catalog.MonitorCronSchedule": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -1407,7 +1533,7 @@ ] }, "catalog.MonitorDataClassificationConfig": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -1425,7 +1551,7 @@ ] }, "catalog.MonitorDestination": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -1443,7 +1569,7 @@ ] }, "catalog.MonitorInferenceLog": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -1500,7 +1626,7 @@ ] }, "catalog.MonitorMetric": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -1550,7 +1676,7 @@ ] }, "catalog.MonitorNotifications": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -1572,7 +1698,7 @@ ] }, "catalog.MonitorSnapshot": { - "anyOf": [ + "oneOf": [ { "type": "object", "additionalProperties": false @@ -1584,7 +1710,7 @@ ] }, "catalog.MonitorTimeSeries": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -1617,7 +1743,7 @@ ] }, "compute.Adlsgen2Info": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -1638,7 +1764,7 @@ ] }, "compute.AutoScale": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -1660,7 +1786,7 @@ ] }, "compute.AwsAttributes": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -1721,7 +1847,7 @@ ] }, "compute.AzureAttributes": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -1759,7 +1885,7 @@ ] }, "compute.ClientsTypes": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -1781,7 +1907,7 @@ ] }, "compute.ClusterLogConf": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -1803,7 +1929,7 @@ ] }, "compute.ClusterSpec": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -1873,6 +1999,13 @@ "description": "The optional ID of the instance pool to which the cluster belongs.", "$ref": "#/$defs/string" }, + "is_single_node": { + "description": "This field can only be used with `kind`.\n\nWhen set to true, Databricks will automatically set single node related `custom_tags`, `spark_conf`, and `num_workers`\n", + "$ref": "#/$defs/bool" + }, + "kind": { + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/compute.Kind" + }, "node_type_id": { "description": "This field encodes, through a single value, the resources available to each of\nthe Spark nodes in this cluster. For example, the Spark nodes can be provisioned\nand optimized for memory or compute intensive workloads. A list of available node\ntypes can be retrieved by using the :method:clusters/listNodeTypes API call.\n", "$ref": "#/$defs/string" @@ -1908,6 +2041,10 @@ "description": "SSH public key contents that will be added to each Spark node in this cluster. The\ncorresponding private keys can be used to login with the user name `ubuntu` on port `2200`.\nUp to 10 keys can be specified.", "$ref": "#/$defs/slice/string" }, + "use_ml_runtime": { + "description": "This field can only be used with `kind`.\n\n`effective_spark_version` is determined by `spark_version` (DBR release), this field `use_ml_runtime`, and whether `node_type_id` is gpu node or not.\n", + "$ref": "#/$defs/bool" + }, "workload_type": { "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/compute.WorkloadType" } @@ -1922,8 +2059,11 @@ }, "compute.DataSecurityMode": { "type": "string", - "description": "Data security mode decides what data governance model to use when accessing data\nfrom a cluster.\n\n* `NONE`: No security isolation for multiple users sharing the cluster. Data governance features are not available in this mode.\n* `SINGLE_USER`: A secure cluster that can only be exclusively used by a single user specified in `single_user_name`. Most programming languages, cluster features and data governance features are available in this mode.\n* `USER_ISOLATION`: A secure cluster that can be shared by multiple users. Cluster users are fully isolated so that they cannot see each other's data and credentials. Most data governance features are supported in this mode. But programming languages and cluster features might be limited.\n\nThe following modes are deprecated starting with Databricks Runtime 15.0 and\nwill be removed for future Databricks Runtime versions:\n\n* `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table ACL clusters.\n* `LEGACY_PASSTHROUGH`: This mode is for users migrating from legacy Passthrough on high concurrency clusters.\n* `LEGACY_SINGLE_USER`: This mode is for users migrating from legacy Passthrough on standard clusters.\n* `LEGACY_SINGLE_USER_STANDARD`: This mode provides a way that doesn’t have UC nor passthrough enabled.\n", + "description": "Data security mode decides what data governance model to use when accessing data\nfrom a cluster.\n\nThe following modes can only be used with `kind`.\n* `DATA_SECURITY_MODE_AUTO`: Databricks will choose the most appropriate access mode depending on your compute configuration.\n* `DATA_SECURITY_MODE_STANDARD`: Alias for `USER_ISOLATION`.\n* `DATA_SECURITY_MODE_DEDICATED`: Alias for `SINGLE_USER`.\n\nThe following modes can be used regardless of `kind`.\n* `NONE`: No security isolation for multiple users sharing the cluster. Data governance features are not available in this mode.\n* `SINGLE_USER`: A secure cluster that can only be exclusively used by a single user specified in `single_user_name`. Most programming languages, cluster features and data governance features are available in this mode.\n* `USER_ISOLATION`: A secure cluster that can be shared by multiple users. Cluster users are fully isolated so that they cannot see each other's data and credentials. Most data governance features are supported in this mode. But programming languages and cluster features might be limited.\n\nThe following modes are deprecated starting with Databricks Runtime 15.0 and\nwill be removed for future Databricks Runtime versions:\n\n* `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table ACL clusters.\n* `LEGACY_PASSTHROUGH`: This mode is for users migrating from legacy Passthrough on high concurrency clusters.\n* `LEGACY_SINGLE_USER`: This mode is for users migrating from legacy Passthrough on standard clusters.\n* `LEGACY_SINGLE_USER_STANDARD`: This mode provides a way that doesn’t have UC nor passthrough enabled.\n", "enum": [ + "DATA_SECURITY_MODE_AUTO", + "DATA_SECURITY_MODE_STANDARD", + "DATA_SECURITY_MODE_DEDICATED", "NONE", "SINGLE_USER", "USER_ISOLATION", @@ -1934,7 +2074,7 @@ ] }, "compute.DbfsStorageInfo": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -1955,7 +2095,7 @@ ] }, "compute.DockerBasicAuth": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -1977,7 +2117,7 @@ ] }, "compute.DockerImage": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -2006,7 +2146,7 @@ ] }, "compute.Environment": { - "anyOf": [ + "oneOf": [ { "type": "object", "description": "The environment entity used to preserve serverless environment side panel and jobs' environment for non-notebook task.\nIn this minimal environment spec, only pip dependencies are supported.", @@ -2032,7 +2172,7 @@ ] }, "compute.GcpAttributes": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -2078,7 +2218,7 @@ ] }, "compute.GcsStorageInfo": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -2099,7 +2239,7 @@ ] }, "compute.InitScriptInfo": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -2140,8 +2280,11 @@ } ] }, + "compute.Kind": { + "type": "string" + }, "compute.Library": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -2183,7 +2326,7 @@ ] }, "compute.LocalFileInfo": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -2204,7 +2347,7 @@ ] }, "compute.LogAnalyticsInfo": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -2226,7 +2369,7 @@ ] }, "compute.MavenLibrary": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -2255,7 +2398,7 @@ ] }, "compute.PythonPyPiLibrary": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -2280,7 +2423,7 @@ ] }, "compute.RCranLibrary": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -2314,7 +2457,7 @@ ] }, "compute.S3StorageInfo": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -2359,7 +2502,7 @@ ] }, "compute.VolumesStorageInfo": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -2380,7 +2523,7 @@ ] }, "compute.WorkloadType": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -2401,7 +2544,7 @@ ] }, "compute.WorkspaceStorageInfo": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -2428,6 +2571,40 @@ "TRASHED" ] }, + "jobs.CleanRoomsNotebookTask": { + "oneOf": [ + { + "type": "object", + "properties": { + "clean_room_name": { + "description": "The clean room that the notebook belongs to.", + "$ref": "#/$defs/string" + }, + "etag": { + "description": "Checksum to validate the freshness of the notebook resource (i.e. the notebook being run is the latest version).\nIt can be fetched by calling the :method:cleanroomassets/get API.", + "$ref": "#/$defs/string" + }, + "notebook_base_parameters": { + "description": "Base parameters to be used for the clean room notebook job.", + "$ref": "#/$defs/map/string" + }, + "notebook_name": { + "description": "Name of the notebook being run.", + "$ref": "#/$defs/string" + } + }, + "additionalProperties": false, + "required": [ + "clean_room_name", + "notebook_name" + ] + }, + { + "type": "string", + "pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}" + } + ] + }, "jobs.Condition": { "type": "string", "enum": [ @@ -2436,7 +2613,7 @@ ] }, "jobs.ConditionTask": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -2479,7 +2656,7 @@ ] }, "jobs.Continuous": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -2497,7 +2674,7 @@ ] }, "jobs.CronSchedule": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -2527,7 +2704,7 @@ ] }, "jobs.DbtTask": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -2572,7 +2749,7 @@ ] }, "jobs.FileArrivalTriggerConfiguration": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -2601,7 +2778,7 @@ ] }, "jobs.ForEachTask": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -2651,7 +2828,7 @@ ] }, "jobs.GitSnapshot": { - "anyOf": [ + "oneOf": [ { "type": "object", "description": "Read-only state of the remote repository at the time the job was run. This field is only included on job runs.", @@ -2670,7 +2847,7 @@ ] }, "jobs.GitSource": { - "anyOf": [ + "oneOf": [ { "type": "object", "description": "An optional specification for a remote Git repository containing the source code used by tasks. Version-controlled source code is supported by notebook, dbt, Python script, and SQL File tasks.\n\nIf `git_source` is set, these tasks retrieve the file from the remote repository by default. However, this behavior can be overridden by setting `source` to `WORKSPACE` on the task.\n\nNote: dbt and SQL File tasks support only version-controlled sources. If dbt or SQL File tasks are used, `git_source` must be defined on the job.", @@ -2709,7 +2886,7 @@ ] }, "jobs.JobCluster": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -2735,7 +2912,7 @@ ] }, "jobs.JobDeployment": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -2775,7 +2952,7 @@ ] }, "jobs.JobEmailNotifications": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -2813,7 +2990,7 @@ ] }, "jobs.JobEnvironment": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -2837,7 +3014,7 @@ ] }, "jobs.JobNotificationSettings": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -2859,7 +3036,7 @@ ] }, "jobs.JobParameterDefinition": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -2885,10 +3062,10 @@ ] }, "jobs.JobRunAs": { - "anyOf": [ + "oneOf": [ { "type": "object", - "description": "Write-only setting. Specifies the user, service principal or group that the job/pipeline runs as. If not specified, the job/pipeline runs as the user who created the job/pipeline.\n\nEither `user_name` or `service_principal_name` should be specified. If not, an error is thrown.", + "description": "Write-only setting. Specifies the user or service principal that the job runs as. If not specified, the job runs as the user who created the job.\n\nEither `user_name` or `service_principal_name` should be specified. If not, an error is thrown.", "properties": { "service_principal_name": { "description": "Application ID of an active service principal. Setting this field requires the `servicePrincipal/user` role.", @@ -2908,7 +3085,7 @@ ] }, "jobs.JobSource": { - "anyOf": [ + "oneOf": [ { "type": "object", "description": "The source of the job specification in the remote repository when the job is source controlled.", @@ -2948,7 +3125,7 @@ }, "jobs.JobsHealthMetric": { "type": "string", - "description": "Specifies the health metric that is being evaluated for a particular health rule.\n\n* `RUN_DURATION_SECONDS`: Expected total time for a run in seconds.\n* `STREAMING_BACKLOG_BYTES`: An estimate of the maximum bytes of data waiting to be consumed across all streams. This metric is in Private Preview.\n* `STREAMING_BACKLOG_RECORDS`: An estimate of the maximum offset lag across all streams. This metric is in Private Preview.\n* `STREAMING_BACKLOG_SECONDS`: An estimate of the maximum consumer delay across all streams. This metric is in Private Preview.\n* `STREAMING_BACKLOG_FILES`: An estimate of the maximum number of outstanding files across all streams. This metric is in Private Preview.", + "description": "Specifies the health metric that is being evaluated for a particular health rule.\n\n* `RUN_DURATION_SECONDS`: Expected total time for a run in seconds.\n* `STREAMING_BACKLOG_BYTES`: An estimate of the maximum bytes of data waiting to be consumed across all streams. This metric is in Public Preview.\n* `STREAMING_BACKLOG_RECORDS`: An estimate of the maximum offset lag across all streams. This metric is in Public Preview.\n* `STREAMING_BACKLOG_SECONDS`: An estimate of the maximum consumer delay across all streams. This metric is in Public Preview.\n* `STREAMING_BACKLOG_FILES`: An estimate of the maximum number of outstanding files across all streams. This metric is in Public Preview.", "enum": [ "RUN_DURATION_SECONDS", "STREAMING_BACKLOG_BYTES", @@ -2965,7 +3142,7 @@ ] }, "jobs.JobsHealthRule": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -2994,7 +3171,7 @@ ] }, "jobs.JobsHealthRules": { - "anyOf": [ + "oneOf": [ { "type": "object", "description": "An optional set of health rules that can be defined for this job.", @@ -3012,7 +3189,7 @@ ] }, "jobs.NotebookTask": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -3052,7 +3229,7 @@ ] }, "jobs.PeriodicTriggerConfiguration": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -3086,7 +3263,7 @@ ] }, "jobs.PipelineParams": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -3104,7 +3281,7 @@ ] }, "jobs.PipelineTask": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -3129,7 +3306,7 @@ ] }, "jobs.PythonWheelTask": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -3163,7 +3340,7 @@ ] }, "jobs.QueueSettings": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -3196,7 +3373,7 @@ ] }, "jobs.RunJobTask": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -3260,7 +3437,7 @@ ] }, "jobs.SparkJarTask": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -3286,7 +3463,7 @@ ] }, "jobs.SparkPythonTask": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -3315,7 +3492,7 @@ ] }, "jobs.SparkSubmitTask": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -3333,7 +3510,7 @@ ] }, "jobs.SqlTask": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -3374,7 +3551,7 @@ ] }, "jobs.SqlTaskAlert": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -3403,7 +3580,7 @@ ] }, "jobs.SqlTaskDashboard": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -3436,7 +3613,7 @@ ] }, "jobs.SqlTaskFile": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -3461,7 +3638,7 @@ ] }, "jobs.SqlTaskQuery": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -3482,7 +3659,7 @@ ] }, "jobs.SqlTaskSubscription": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -3504,7 +3681,7 @@ ] }, "jobs.TableUpdateTriggerConfiguration": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -3534,10 +3711,14 @@ ] }, "jobs.Task": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { + "clean_rooms_notebook_task": { + "description": "The task runs a [clean rooms](https://docs.databricks.com/en/clean-rooms/index.html) notebook\nwhen the `clean_rooms_notebook_task` field is present.", + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.CleanRoomsNotebookTask" + }, "condition_task": { "description": "The task evaluates a condition that can be used to control the execution of other tasks when the `condition_task` field is present.\nThe condition task does not require a cluster to execute and does not support retries or notifications.", "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.ConditionTask" @@ -3666,7 +3847,7 @@ ] }, "jobs.TaskDependency": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -3691,7 +3872,7 @@ ] }, "jobs.TaskEmailNotifications": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -3729,7 +3910,7 @@ ] }, "jobs.TaskNotificationSettings": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -3755,7 +3936,7 @@ ] }, "jobs.TriggerSettings": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -3788,7 +3969,7 @@ ] }, "jobs.Webhook": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -3808,7 +3989,7 @@ ] }, "jobs.WebhookNotifications": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -3842,7 +4023,7 @@ ] }, "ml.ExperimentTag": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -3864,7 +4045,7 @@ ] }, "ml.ModelTag": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -3886,7 +4067,7 @@ ] }, "ml.ModelVersion": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -3961,7 +4142,7 @@ ] }, "ml.ModelVersionTag": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -3983,7 +4164,7 @@ ] }, "pipelines.CronTrigger": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -4010,7 +4191,7 @@ ] }, "pipelines.FileLibrary": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -4028,7 +4209,7 @@ ] }, "pipelines.Filters": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -4050,7 +4231,7 @@ ] }, "pipelines.IngestionConfig": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -4076,7 +4257,7 @@ ] }, "pipelines.IngestionGatewayPipelineDefinition": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -4110,7 +4291,7 @@ ] }, "pipelines.IngestionPipelineDefinition": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -4140,7 +4321,7 @@ ] }, "pipelines.ManualTrigger": { - "anyOf": [ + "oneOf": [ { "type": "object", "additionalProperties": false @@ -4152,7 +4333,7 @@ ] }, "pipelines.NotebookLibrary": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -4170,7 +4351,7 @@ ] }, "pipelines.Notifications": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -4192,7 +4373,7 @@ ] }, "pipelines.PipelineCluster": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -4282,7 +4463,7 @@ ] }, "pipelines.PipelineClusterAutoscale": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -4320,7 +4501,7 @@ ] }, "pipelines.PipelineDeployment": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -4342,7 +4523,7 @@ ] }, "pipelines.PipelineLibrary": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -4376,7 +4557,7 @@ ] }, "pipelines.PipelineTrigger": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -4396,7 +4577,7 @@ ] }, "pipelines.ReportSpec": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -4430,22 +4611,13 @@ ] }, "pipelines.RestartWindow": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { "days_of_week": { "description": "Days of week in which the restart is allowed to happen (within a five-hour window starting at start_hour).\nIf not specified all days of the week will be used.", - "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.RestartWindowDaysOfWeek", - "enum": [ - "MONDAY", - "TUESDAY", - "WEDNESDAY", - "THURSDAY", - "FRIDAY", - "SATURDAY", - "SUNDAY" - ] + "$ref": "#/$defs/slice/github.com/databricks/databricks-sdk-go/service/pipelines.RestartWindowDaysOfWeek" }, "start_hour": { "description": "An integer between 0 and 23 denoting the start hour for the restart window in the 24-hour day.\nContinuous pipeline restart is triggered only within a five-hour window starting at this hour.", @@ -4468,10 +4640,20 @@ ] }, "pipelines.RestartWindowDaysOfWeek": { - "type": "string" + "type": "string", + "description": "Days of week in which the restart is allowed to happen (within a five-hour window starting at start_hour).\nIf not specified all days of the week will be used.", + "enum": [ + "MONDAY", + "TUESDAY", + "WEDNESDAY", + "THURSDAY", + "FRIDAY", + "SATURDAY", + "SUNDAY" + ] }, "pipelines.SchemaSpec": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -4505,7 +4687,7 @@ ] }, "pipelines.TableSpec": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -4547,7 +4729,7 @@ ] }, "pipelines.TableSpecificConfig": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -4585,14 +4767,16 @@ ] }, "serving.Ai21LabsConfig": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { "ai21labs_api_key": { + "description": "The Databricks secret key reference for an AI21 Labs API key. If you prefer to paste your API key directly, see `ai21labs_api_key_plaintext`. You must provide an API key using one of the following fields: `ai21labs_api_key` or `ai21labs_api_key_plaintext`.", "$ref": "#/$defs/string" }, "ai21labs_api_key_plaintext": { + "description": "An AI21 Labs API key provided as a plaintext string. If you prefer to reference your key using Databricks Secrets, see `ai21labs_api_key`. You must provide an API key using one of the following fields: `ai21labs_api_key` or `ai21labs_api_key_plaintext`.", "$ref": "#/$defs/string" } }, @@ -4605,7 +4789,7 @@ ] }, "serving.AiGatewayConfig": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -4635,7 +4819,7 @@ ] }, "serving.AiGatewayGuardrailParameters": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -4665,7 +4849,7 @@ ] }, "serving.AiGatewayGuardrailPiiBehavior": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -4694,7 +4878,7 @@ ] }, "serving.AiGatewayGuardrails": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -4716,7 +4900,7 @@ ] }, "serving.AiGatewayInferenceTableConfig": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -4746,7 +4930,7 @@ ] }, "serving.AiGatewayRateLimit": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -4791,7 +4975,7 @@ ] }, "serving.AiGatewayUsageTrackingConfig": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -4809,7 +4993,7 @@ ] }, "serving.AmazonBedrockConfig": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -4861,7 +5045,7 @@ ] }, "serving.AnthropicConfig": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -4883,7 +5067,7 @@ ] }, "serving.AutoCaptureConfigInput": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -4913,7 +5097,7 @@ ] }, "serving.CohereConfig": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -4939,7 +5123,7 @@ ] }, "serving.DatabricksModelServingConfig": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -4968,7 +5152,7 @@ ] }, "serving.EndpointCoreConfigInput": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -4998,7 +5182,7 @@ ] }, "serving.EndpointTag": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -5023,7 +5207,7 @@ ] }, "serving.ExternalModel": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -5100,20 +5284,24 @@ ] }, "serving.GoogleCloudVertexAiConfig": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { "private_key": { + "description": "The Databricks secret key reference for a private key for the service account which has access to the Google Cloud Vertex AI Service. See [Best practices for managing service account keys](https://cloud.google.com/iam/docs/best-practices-for-managing-service-account-keys). If you prefer to paste your API key directly, see `private_key_plaintext`. You must provide an API key using one of the following fields: `private_key` or `private_key_plaintext`", "$ref": "#/$defs/string" }, "private_key_plaintext": { + "description": "The private key for the service account which has access to the Google Cloud Vertex AI Service provided as a plaintext secret. See [Best practices for managing service account keys](https://cloud.google.com/iam/docs/best-practices-for-managing-service-account-keys). If you prefer to reference your key using Databricks Secrets, see `private_key`. You must provide an API key using one of the following fields: `private_key` or `private_key_plaintext`.", "$ref": "#/$defs/string" }, "project_id": { + "description": "This is the Google Cloud project id that the service account is associated with.", "$ref": "#/$defs/string" }, "region": { + "description": "This is the region for the Google Cloud Vertex AI Service. See [supported regions](https://cloud.google.com/vertex-ai/docs/general/locations) for more details. Some models are only available in specific regions.", "$ref": "#/$defs/string" } }, @@ -5126,41 +5314,52 @@ ] }, "serving.OpenAiConfig": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { "microsoft_entra_client_id": { + "description": "This field is only required for Azure AD OpenAI and is the Microsoft Entra Client ID.\n", "$ref": "#/$defs/string" }, "microsoft_entra_client_secret": { + "description": "The Databricks secret key reference for a client secret used for Microsoft Entra ID authentication.\nIf you prefer to paste your client secret directly, see `microsoft_entra_client_secret_plaintext`.\nYou must provide an API key using one of the following fields: `microsoft_entra_client_secret` or `microsoft_entra_client_secret_plaintext`.\n", "$ref": "#/$defs/string" }, "microsoft_entra_client_secret_plaintext": { + "description": "The client secret used for Microsoft Entra ID authentication provided as a plaintext string.\nIf you prefer to reference your key using Databricks Secrets, see `microsoft_entra_client_secret`.\nYou must provide an API key using one of the following fields: `microsoft_entra_client_secret` or `microsoft_entra_client_secret_plaintext`.\n", "$ref": "#/$defs/string" }, "microsoft_entra_tenant_id": { + "description": "This field is only required for Azure AD OpenAI and is the Microsoft Entra Tenant ID.\n", "$ref": "#/$defs/string" }, "openai_api_base": { + "description": "This is a field to provide a customized base URl for the OpenAI API.\nFor Azure OpenAI, this field is required, and is the base URL for the Azure OpenAI API service\nprovided by Azure.\nFor other OpenAI API types, this field is optional, and if left unspecified, the standard OpenAI base URL is used.\n", "$ref": "#/$defs/string" }, "openai_api_key": { + "description": "The Databricks secret key reference for an OpenAI API key using the OpenAI or Azure service. If you prefer to paste your API key directly, see `openai_api_key_plaintext`. You must provide an API key using one of the following fields: `openai_api_key` or `openai_api_key_plaintext`.", "$ref": "#/$defs/string" }, "openai_api_key_plaintext": { + "description": "The OpenAI API key using the OpenAI or Azure service provided as a plaintext string. If you prefer to reference your key using Databricks Secrets, see `openai_api_key`. You must provide an API key using one of the following fields: `openai_api_key` or `openai_api_key_plaintext`.", "$ref": "#/$defs/string" }, "openai_api_type": { + "description": "This is an optional field to specify the type of OpenAI API to use.\nFor Azure OpenAI, this field is required, and adjust this parameter to represent the preferred security\naccess validation protocol. For access token validation, use azure. For authentication using Azure Active\nDirectory (Azure AD) use, azuread.\n", "$ref": "#/$defs/string" }, "openai_api_version": { + "description": "This is an optional field to specify the OpenAI API version.\nFor Azure OpenAI, this field is required, and is the version of the Azure OpenAI service to\nutilize, specified by a date.\n", "$ref": "#/$defs/string" }, "openai_deployment_name": { + "description": "This field is only required for Azure OpenAI and is the name of the deployment resource for the\nAzure OpenAI service.\n", "$ref": "#/$defs/string" }, "openai_organization": { + "description": "This is an optional field to specify the organization in OpenAI or Azure OpenAI.\n", "$ref": "#/$defs/string" } }, @@ -5173,14 +5372,16 @@ ] }, "serving.PaLmConfig": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { "palm_api_key": { + "description": "The Databricks secret key reference for a PaLM API key. If you prefer to paste your API key directly, see `palm_api_key_plaintext`. You must provide an API key using one of the following fields: `palm_api_key` or `palm_api_key_plaintext`.", "$ref": "#/$defs/string" }, "palm_api_key_plaintext": { + "description": "The PaLM API key provided as a plaintext string. If you prefer to reference your key using Databricks Secrets, see `palm_api_key`. You must provide an API key using one of the following fields: `palm_api_key` or `palm_api_key_plaintext`.", "$ref": "#/$defs/string" } }, @@ -5193,7 +5394,7 @@ ] }, "serving.RateLimit": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -5238,7 +5439,7 @@ ] }, "serving.Route": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -5264,7 +5465,7 @@ ] }, "serving.ServedEntityInput": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -5322,7 +5523,7 @@ ] }, "serving.ServedModelInput": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -5401,7 +5602,7 @@ ] }, "serving.TrafficConfig": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -5423,7 +5624,7 @@ } }, "int": { - "anyOf": [ + "oneOf": [ { "type": "integer" }, @@ -5450,7 +5651,7 @@ ] }, "int64": { - "anyOf": [ + "oneOf": [ { "type": "integer" }, @@ -5484,7 +5685,7 @@ "bundle": { "config": { "resources.Cluster": { - "anyOf": [ + "oneOf": [ { "type": "object", "additionalProperties": { @@ -5498,7 +5699,7 @@ ] }, "resources.Dashboard": { - "anyOf": [ + "oneOf": [ { "type": "object", "additionalProperties": { @@ -5512,7 +5713,7 @@ ] }, "resources.Job": { - "anyOf": [ + "oneOf": [ { "type": "object", "additionalProperties": { @@ -5526,7 +5727,7 @@ ] }, "resources.MlflowExperiment": { - "anyOf": [ + "oneOf": [ { "type": "object", "additionalProperties": { @@ -5540,7 +5741,7 @@ ] }, "resources.MlflowModel": { - "anyOf": [ + "oneOf": [ { "type": "object", "additionalProperties": { @@ -5554,7 +5755,7 @@ ] }, "resources.ModelServingEndpoint": { - "anyOf": [ + "oneOf": [ { "type": "object", "additionalProperties": { @@ -5568,7 +5769,7 @@ ] }, "resources.Pipeline": { - "anyOf": [ + "oneOf": [ { "type": "object", "additionalProperties": { @@ -5582,7 +5783,7 @@ ] }, "resources.QualityMonitor": { - "anyOf": [ + "oneOf": [ { "type": "object", "additionalProperties": { @@ -5596,7 +5797,7 @@ ] }, "resources.RegisteredModel": { - "anyOf": [ + "oneOf": [ { "type": "object", "additionalProperties": { @@ -5610,7 +5811,7 @@ ] }, "resources.Schema": { - "anyOf": [ + "oneOf": [ { "type": "object", "additionalProperties": { @@ -5624,7 +5825,7 @@ ] }, "resources.Volume": { - "anyOf": [ + "oneOf": [ { "type": "object", "additionalProperties": { @@ -5638,7 +5839,7 @@ ] }, "variable.TargetVariable": { - "anyOf": [ + "oneOf": [ { "type": "object", "additionalProperties": { @@ -5652,7 +5853,7 @@ ] }, "variable.Variable": { - "anyOf": [ + "oneOf": [ { "type": "object", "additionalProperties": { @@ -5667,7 +5868,7 @@ } }, "config.Artifact": { - "anyOf": [ + "oneOf": [ { "type": "object", "additionalProperties": { @@ -5681,7 +5882,7 @@ ] }, "config.Command": { - "anyOf": [ + "oneOf": [ { "type": "object", "additionalProperties": { @@ -5695,7 +5896,7 @@ ] }, "config.Target": { - "anyOf": [ + "oneOf": [ { "type": "object", "additionalProperties": { @@ -5713,7 +5914,7 @@ } }, "string": { - "anyOf": [ + "oneOf": [ { "type": "object", "additionalProperties": { @@ -5734,7 +5935,7 @@ "bundle": { "config": { "resources.Grant": { - "anyOf": [ + "oneOf": [ { "type": "array", "items": { @@ -5748,7 +5949,7 @@ ] }, "resources.Permission": { - "anyOf": [ + "oneOf": [ { "type": "array", "items": { @@ -5763,7 +5964,7 @@ } }, "config.ArtifactFile": { - "anyOf": [ + "oneOf": [ { "type": "array", "items": { @@ -5781,7 +5982,7 @@ "databricks-sdk-go": { "service": { "catalog.MonitorMetric": { - "anyOf": [ + "oneOf": [ { "type": "array", "items": { @@ -5795,7 +5996,7 @@ ] }, "compute.InitScriptInfo": { - "anyOf": [ + "oneOf": [ { "type": "array", "items": { @@ -5809,7 +6010,7 @@ ] }, "compute.Library": { - "anyOf": [ + "oneOf": [ { "type": "array", "items": { @@ -5823,7 +6024,7 @@ ] }, "jobs.JobCluster": { - "anyOf": [ + "oneOf": [ { "type": "array", "items": { @@ -5837,7 +6038,7 @@ ] }, "jobs.JobEnvironment": { - "anyOf": [ + "oneOf": [ { "type": "array", "items": { @@ -5851,7 +6052,7 @@ ] }, "jobs.JobParameterDefinition": { - "anyOf": [ + "oneOf": [ { "type": "array", "items": { @@ -5865,7 +6066,7 @@ ] }, "jobs.JobsHealthRule": { - "anyOf": [ + "oneOf": [ { "type": "array", "items": { @@ -5879,7 +6080,7 @@ ] }, "jobs.SqlTaskSubscription": { - "anyOf": [ + "oneOf": [ { "type": "array", "items": { @@ -5893,7 +6094,7 @@ ] }, "jobs.Task": { - "anyOf": [ + "oneOf": [ { "type": "array", "items": { @@ -5907,7 +6108,7 @@ ] }, "jobs.TaskDependency": { - "anyOf": [ + "oneOf": [ { "type": "array", "items": { @@ -5921,7 +6122,7 @@ ] }, "jobs.Webhook": { - "anyOf": [ + "oneOf": [ { "type": "array", "items": { @@ -5935,7 +6136,7 @@ ] }, "ml.ExperimentTag": { - "anyOf": [ + "oneOf": [ { "type": "array", "items": { @@ -5949,7 +6150,7 @@ ] }, "ml.ModelTag": { - "anyOf": [ + "oneOf": [ { "type": "array", "items": { @@ -5963,7 +6164,7 @@ ] }, "ml.ModelVersion": { - "anyOf": [ + "oneOf": [ { "type": "array", "items": { @@ -5977,7 +6178,7 @@ ] }, "ml.ModelVersionTag": { - "anyOf": [ + "oneOf": [ { "type": "array", "items": { @@ -5991,7 +6192,7 @@ ] }, "pipelines.IngestionConfig": { - "anyOf": [ + "oneOf": [ { "type": "array", "items": { @@ -6005,7 +6206,7 @@ ] }, "pipelines.Notifications": { - "anyOf": [ + "oneOf": [ { "type": "array", "items": { @@ -6019,7 +6220,7 @@ ] }, "pipelines.PipelineCluster": { - "anyOf": [ + "oneOf": [ { "type": "array", "items": { @@ -6033,7 +6234,7 @@ ] }, "pipelines.PipelineLibrary": { - "anyOf": [ + "oneOf": [ { "type": "array", "items": { @@ -6046,8 +6247,22 @@ } ] }, + "pipelines.RestartWindowDaysOfWeek": { + "oneOf": [ + { + "type": "array", + "items": { + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.RestartWindowDaysOfWeek" + } + }, + { + "type": "string", + "pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}" + } + ] + }, "serving.AiGatewayRateLimit": { - "anyOf": [ + "oneOf": [ { "type": "array", "items": { @@ -6061,7 +6276,7 @@ ] }, "serving.EndpointTag": { - "anyOf": [ + "oneOf": [ { "type": "array", "items": { @@ -6075,7 +6290,7 @@ ] }, "serving.RateLimit": { - "anyOf": [ + "oneOf": [ { "type": "array", "items": { @@ -6089,7 +6304,7 @@ ] }, "serving.Route": { - "anyOf": [ + "oneOf": [ { "type": "array", "items": { @@ -6103,7 +6318,7 @@ ] }, "serving.ServedEntityInput": { - "anyOf": [ + "oneOf": [ { "type": "array", "items": { @@ -6117,7 +6332,7 @@ ] }, "serving.ServedModelInput": { - "anyOf": [ + "oneOf": [ { "type": "array", "items": { @@ -6135,7 +6350,7 @@ } }, "string": { - "anyOf": [ + "oneOf": [ { "type": "array", "items": { @@ -6156,39 +6371,57 @@ "type": "object", "properties": { "artifacts": { + "description": "Defines the attributes to build an artifact", "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config.Artifact" }, "bundle": { - "$ref": "#/$defs/github.com/databricks/cli/bundle/config.Bundle" + "description": "The attributes of the bundle.", + "$ref": "#/$defs/github.com/databricks/cli/bundle/config.Bundle", + "markdownDescription": "The attributes of the bundle. See [bundle](https://docs.databricks.com/dev-tools/bundles/settings.html#bundle)" }, "experimental": { + "description": "Defines attributes for experimental features.", "$ref": "#/$defs/github.com/databricks/cli/bundle/config.Experimental" }, "include": { - "$ref": "#/$defs/slice/string" + "description": "Specifies a list of path globs that contain configuration files to include within the bundle.", + "$ref": "#/$defs/slice/string", + "markdownDescription": "Specifies a list of path globs that contain configuration files to include within the bundle. See [include](https://docs.databricks.com/dev-tools/bundles/settings.html#include)" }, "permissions": { - "$ref": "#/$defs/slice/github.com/databricks/cli/bundle/config/resources.Permission" + "description": "Defines the permissions to apply to experiments, jobs, pipelines, and models defined in the bundle", + "$ref": "#/$defs/slice/github.com/databricks/cli/bundle/config/resources.Permission", + "markdownDescription": "Defines the permissions to apply to experiments, jobs, pipelines, and models defined in the bundle. See [permissions](https://docs.databricks.com/dev-tools/bundles/settings.html#permissions) and [link](https://docs.databricks.com/dev-tools/bundles/permissions.html)." }, "presets": { - "$ref": "#/$defs/github.com/databricks/cli/bundle/config.Presets" + "description": "Defines bundle deployment presets.", + "$ref": "#/$defs/github.com/databricks/cli/bundle/config.Presets", + "markdownDescription": "Defines bundle deployment presets. See [presets](https://docs.databricks.com/dev-tools/bundles/deployment-modes.html#presets)." }, "resources": { - "$ref": "#/$defs/github.com/databricks/cli/bundle/config.Resources" + "description": "Specifies information about the Databricks resources used by the bundle", + "$ref": "#/$defs/github.com/databricks/cli/bundle/config.Resources", + "markdownDescription": "Specifies information about the Databricks resources used by the bundle. See [link](https://docs.databricks.com/dev-tools/bundles/resources.html)." }, "run_as": { + "description": "The identity to use to run the bundle.", "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.JobRunAs" }, "sync": { - "$ref": "#/$defs/github.com/databricks/cli/bundle/config.Sync" + "description": "The files and file paths to include or exclude in the bundle.", + "$ref": "#/$defs/github.com/databricks/cli/bundle/config.Sync", + "markdownDescription": "The files and file paths to include or exclude in the bundle. See [link](https://docs.databricks.com/dev-tools/bundles/)" }, "targets": { + "description": "Defines deployment targets for the bundle.", "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config.Target" }, "variables": { + "description": "A Map that defines the custom variables for the bundle, where each key is the name of the variable, and the value is a Map that defines the variable.", "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/variable.Variable" }, "workspace": { + "description": "Defines the Databricks workspace for the bundle.", "$ref": "#/$defs/github.com/databricks/cli/bundle/config.Workspace" } }, diff --git a/bundle/tests/run_as_test.go b/bundle/tests/run_as_test.go index 920577146..113a6140b 100644 --- a/bundle/tests/run_as_test.go +++ b/bundle/tests/run_as_test.go @@ -93,7 +93,6 @@ func TestRunAsForAllowedWithTargetOverride(t *testing.T) { assert.Equal(t, ml.Model{Name: "skynet"}, *b.Config.Resources.Models["model_one"].Model) assert.Equal(t, catalog.CreateRegisteredModelRequest{Name: "skynet (in UC)"}, *b.Config.Resources.RegisteredModels["model_two"].CreateRegisteredModelRequest) assert.Equal(t, ml.Experiment{Name: "experiment_one"}, *b.Config.Resources.Experiments["experiment_one"].Experiment) - } func TestRunAsErrorForPipelines(t *testing.T) { @@ -220,7 +219,6 @@ func TestRunAsErrorNeitherUserOrSpSpecified(t *testing.T) { for _, tc := range tcases { t.Run(tc.name, func(t *testing.T) { - bundlePath := fmt.Sprintf("./run_as/not_allowed/neither_sp_nor_user/%s", tc.name) b := load(t, bundlePath) diff --git a/bundle/tests/suggest_target_test.go b/bundle/tests/suggest_target_test.go index 8fb130409..02905d779 100644 --- a/bundle/tests/suggest_target_test.go +++ b/bundle/tests/suggest_target_test.go @@ -1,22 +1,22 @@ package config_tests import ( - "path/filepath" + "context" "testing" - "github.com/databricks/cli/cmd/root" - assert "github.com/databricks/cli/libs/dyn/dynassert" - - "github.com/databricks/cli/internal" + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config/mutator" + "github.com/stretchr/testify/require" ) func TestSuggestTargetIfWrongPassed(t *testing.T) { - t.Setenv("BUNDLE_ROOT", filepath.Join("target_overrides", "workspace")) - stdoutBytes, _, err := internal.RequireErrorRun(t, "bundle", "validate", "-e", "incorrect") - stdout := stdoutBytes.String() + b := load(t, "target_overrides/workspace") - assert.Error(t, root.ErrAlreadyPrinted, err) - assert.Contains(t, stdout, "Available targets:") - assert.Contains(t, stdout, "development") - assert.Contains(t, stdout, "staging") + ctx := context.Background() + diags := bundle.Apply(ctx, b, mutator.SelectTarget("incorrect")) + err := diags.Error() + require.Error(t, err) + require.Contains(t, err.Error(), "Available targets:") + require.Contains(t, err.Error(), "development") + require.Contains(t, err.Error(), "staging") } diff --git a/bundle/tests/variables_test.go b/bundle/tests/variables_test.go index 9451c5a04..37d488fad 100644 --- a/bundle/tests/variables_test.go +++ b/bundle/tests/variables_test.go @@ -151,7 +151,7 @@ func TestVariablesWithTargetLookupOverrides(t *testing.T) { } func TestVariableTargetOverrides(t *testing.T) { - var tcases = []struct { + tcases := []struct { targetName string pipelineName string pipelineContinuous bool diff --git a/bundle/trampoline/python_dbr_warning.go b/bundle/trampoline/python_dbr_warning.go index cf3e9aeb3..0318df7c9 100644 --- a/bundle/trampoline/python_dbr_warning.go +++ b/bundle/trampoline/python_dbr_warning.go @@ -14,8 +14,7 @@ import ( "golang.org/x/mod/semver" ) -type wrapperWarning struct { -} +type wrapperWarning struct{} func WrapperWarning() bundle.Mutator { return &wrapperWarning{} @@ -62,7 +61,6 @@ func hasIncompatibleWheelTasks(ctx context.Context, b *bundle.Bundle) bool { if task.ExistingClusterId != "" { version, err := getSparkVersionForCluster(ctx, b.WorkspaceClient(), task.ExistingClusterId) - // If there's error getting spark version for cluster, do not mark it as incompatible if err != nil { log.Warnf(ctx, "unable to get spark version for cluster %s, err: %s", task.ExistingClusterId, err.Error()) diff --git a/bundle/trampoline/python_wheel_test.go b/bundle/trampoline/python_wheel_test.go index 517be35e4..d75a3eca3 100644 --- a/bundle/trampoline/python_wheel_test.go +++ b/bundle/trampoline/python_wheel_test.go @@ -127,7 +127,8 @@ func TestNoPanicWithNoPythonWheelTasks(t *testing.T) { Tasks: []jobs.Task{ { TaskKey: "notebook_task", - NotebookTask: &jobs.NotebookTask{}}, + NotebookTask: &jobs.NotebookTask{}, + }, }, }, }, diff --git a/bundle/trampoline/trampoline.go b/bundle/trampoline/trampoline.go index 1dc1c4463..600ce3d9c 100644 --- a/bundle/trampoline/trampoline.go +++ b/bundle/trampoline/trampoline.go @@ -62,7 +62,7 @@ func (m *trampoline) generateNotebookWrapper(ctx context.Context, b *bundle.Bund notebookName := fmt.Sprintf("notebook_%s_%s", task.JobKey, task.Task.TaskKey) localNotebookPath := filepath.Join(internalDir, notebookName+".py") - err = os.MkdirAll(filepath.Dir(localNotebookPath), 0755) + err = os.MkdirAll(filepath.Dir(localNotebookPath), 0o755) if err != nil { return err } diff --git a/bundle/trampoline/trampoline_test.go b/bundle/trampoline/trampoline_test.go index 4682d8fa0..3c5d18570 100644 --- a/bundle/trampoline/trampoline_test.go +++ b/bundle/trampoline/trampoline_test.go @@ -52,7 +52,8 @@ func TestGenerateTrampoline(t *testing.T) { PythonWheelTask: &jobs.PythonWheelTask{ PackageName: "test", EntryPoint: "run", - }}, + }, + }, } b := &bundle.Bundle{ diff --git a/cmd/account/cmd.go b/cmd/account/cmd.go index 9b4bb8139..f34966fd9 100644 --- a/cmd/account/cmd.go +++ b/cmd/account/cmd.go @@ -11,6 +11,7 @@ import ( credentials "github.com/databricks/cli/cmd/account/credentials" custom_app_integration "github.com/databricks/cli/cmd/account/custom-app-integration" encryption_keys "github.com/databricks/cli/cmd/account/encryption-keys" + account_federation_policy "github.com/databricks/cli/cmd/account/federation-policy" account_groups "github.com/databricks/cli/cmd/account/groups" account_ip_access_lists "github.com/databricks/cli/cmd/account/ip-access-lists" log_delivery "github.com/databricks/cli/cmd/account/log-delivery" @@ -21,6 +22,7 @@ import ( o_auth_published_apps "github.com/databricks/cli/cmd/account/o-auth-published-apps" private_access "github.com/databricks/cli/cmd/account/private-access" published_app_integration "github.com/databricks/cli/cmd/account/published-app-integration" + service_principal_federation_policy "github.com/databricks/cli/cmd/account/service-principal-federation-policy" service_principal_secrets "github.com/databricks/cli/cmd/account/service-principal-secrets" account_service_principals "github.com/databricks/cli/cmd/account/service-principals" account_settings "github.com/databricks/cli/cmd/account/settings" @@ -44,6 +46,7 @@ func New() *cobra.Command { cmd.AddCommand(credentials.New()) cmd.AddCommand(custom_app_integration.New()) cmd.AddCommand(encryption_keys.New()) + cmd.AddCommand(account_federation_policy.New()) cmd.AddCommand(account_groups.New()) cmd.AddCommand(account_ip_access_lists.New()) cmd.AddCommand(log_delivery.New()) @@ -54,6 +57,7 @@ func New() *cobra.Command { cmd.AddCommand(o_auth_published_apps.New()) cmd.AddCommand(private_access.New()) cmd.AddCommand(published_app_integration.New()) + cmd.AddCommand(service_principal_federation_policy.New()) cmd.AddCommand(service_principal_secrets.New()) cmd.AddCommand(account_service_principals.New()) cmd.AddCommand(account_settings.New()) diff --git a/cmd/account/federation-policy/federation-policy.go b/cmd/account/federation-policy/federation-policy.go new file mode 100755 index 000000000..d78ac709a --- /dev/null +++ b/cmd/account/federation-policy/federation-policy.go @@ -0,0 +1,402 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package federation_policy + +import ( + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/oauth2" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "federation-policy", + Short: `These APIs manage account federation policies.`, + Long: `These APIs manage account federation policies. + + Account federation policies allow users and service principals in your + Databricks account to securely access Databricks APIs using tokens from your + trusted identity providers (IdPs). + + With token federation, your users and service principals can exchange tokens + from your IdP for Databricks OAuth tokens, which can be used to access + Databricks APIs. Token federation eliminates the need to manage Databricks + secrets, and allows you to centralize management of token issuance policies in + your IdP. Databricks token federation is typically used in combination with + [SCIM], so users in your IdP are synchronized into your Databricks account. + + Token federation is configured in your Databricks account using an account + federation policy. An account federation policy specifies: * which IdP, or + issuer, your Databricks account should accept tokens from * how to determine + which Databricks user, or subject, a token is issued for + + To configure a federation policy, you provide the following: * The required + token __issuer__, as specified in the “iss” claim of your tokens. The + issuer is an https URL that identifies your IdP. * The allowed token + __audiences__, as specified in the “aud” claim of your tokens. This + identifier is intended to represent the recipient of the token. As long as the + audience in the token matches at least one audience in the policy, the token + is considered a match. If unspecified, the default value is your Databricks + account id. * The __subject claim__, which indicates which token claim + contains the Databricks username of the user the token was issued for. If + unspecified, the default value is “sub”. * Optionally, the public keys + used to validate the signature of your tokens, in JWKS format. If unspecified + (recommended), Databricks automatically fetches the public keys from your + issuer’s well known endpoint. Databricks strongly recommends relying on your + issuer’s well known endpoint for discovering public keys. + + An example federation policy is: issuer: "https://idp.mycompany.com/oidc" + audiences: ["databricks"] subject_claim: "sub" + + An example JWT token body that matches this policy and could be used to + authenticate to Databricks as user username@mycompany.com is: { "iss": + "https://idp.mycompany.com/oidc", "aud": "databricks", "sub": + "username@mycompany.com" } + + You may also need to configure your IdP to generate tokens for your users to + exchange with Databricks, if your users do not already have the ability to + generate tokens that are compatible with your federation policy. + + You do not need to configure an OAuth application in Databricks to use token + federation. + + [SCIM]: https://docs.databricks.com/admin/users-groups/scim/index.html`, + GroupID: "oauth2", + Annotations: map[string]string{ + "package": "oauth2", + }, + + // This service is being previewed; hide from help output. + Hidden: true, + } + + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newUpdate()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start create command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var createOverrides []func( + *cobra.Command, + *oauth2.CreateAccountFederationPolicyRequest, +) + +func newCreate() *cobra.Command { + cmd := &cobra.Command{} + + var createReq oauth2.CreateAccountFederationPolicyRequest + createReq.Policy = &oauth2.FederationPolicy{} + var createJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().StringVar(&createReq.Policy.Description, "description", createReq.Policy.Description, `Description of the federation policy.`) + cmd.Flags().StringVar(&createReq.Policy.Name, "name", createReq.Policy.Name, `Name of the federation policy.`) + // TODO: complex arg: oidc_policy + + cmd.Use = "create" + cmd.Short = `Create account federation policy.` + cmd.Long = `Create account federation policy.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustAccountClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + a := root.AccountClient(ctx) + + if cmd.Flags().Changed("json") { + diags := createJson.Unmarshal(&createReq.Policy) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } + } + } + + response, err := a.FederationPolicy.Create(ctx, createReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range createOverrides { + fn(cmd, &createReq) + } + + return cmd +} + +// start delete command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteOverrides []func( + *cobra.Command, + *oauth2.DeleteAccountFederationPolicyRequest, +) + +func newDelete() *cobra.Command { + cmd := &cobra.Command{} + + var deleteReq oauth2.DeleteAccountFederationPolicyRequest + + // TODO: short flags + + cmd.Use = "delete POLICY_ID" + cmd.Short = `Delete account federation policy.` + cmd.Long = `Delete account federation policy.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustAccountClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + a := root.AccountClient(ctx) + + deleteReq.PolicyId = args[0] + + err = a.FederationPolicy.Delete(ctx, deleteReq) + if err != nil { + return err + } + return nil + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deleteOverrides { + fn(cmd, &deleteReq) + } + + return cmd +} + +// start get command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getOverrides []func( + *cobra.Command, + *oauth2.GetAccountFederationPolicyRequest, +) + +func newGet() *cobra.Command { + cmd := &cobra.Command{} + + var getReq oauth2.GetAccountFederationPolicyRequest + + // TODO: short flags + + cmd.Use = "get POLICY_ID" + cmd.Short = `Get account federation policy.` + cmd.Long = `Get account federation policy.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustAccountClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + a := root.AccountClient(ctx) + + getReq.PolicyId = args[0] + + response, err := a.FederationPolicy.Get(ctx, getReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getOverrides { + fn(cmd, &getReq) + } + + return cmd +} + +// start list command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var listOverrides []func( + *cobra.Command, + *oauth2.ListAccountFederationPoliciesRequest, +) + +func newList() *cobra.Command { + cmd := &cobra.Command{} + + var listReq oauth2.ListAccountFederationPoliciesRequest + + // TODO: short flags + + cmd.Flags().IntVar(&listReq.PageSize, "page-size", listReq.PageSize, ``) + cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, ``) + + cmd.Use = "list" + cmd.Short = `List account federation policies.` + cmd.Long = `List account federation policies.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustAccountClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + a := root.AccountClient(ctx) + + response := a.FederationPolicy.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range listOverrides { + fn(cmd, &listReq) + } + + return cmd +} + +// start update command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateOverrides []func( + *cobra.Command, + *oauth2.UpdateAccountFederationPolicyRequest, +) + +func newUpdate() *cobra.Command { + cmd := &cobra.Command{} + + var updateReq oauth2.UpdateAccountFederationPolicyRequest + updateReq.Policy = &oauth2.FederationPolicy{} + var updateJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().StringVar(&updateReq.Policy.Description, "description", updateReq.Policy.Description, `Description of the federation policy.`) + cmd.Flags().StringVar(&updateReq.Policy.Name, "name", updateReq.Policy.Name, `Name of the federation policy.`) + // TODO: complex arg: oidc_policy + + cmd.Use = "update POLICY_ID UPDATE_MASK" + cmd.Short = `Update account federation policy.` + cmd.Long = `Update account federation policy. + + Arguments: + POLICY_ID: + UPDATE_MASK: Field mask is required to be passed into the PATCH request. Field mask + specifies which fields of the setting payload will be updated. The field + mask needs to be supplied as single string. To specify multiple fields in + the field mask, use comma as the separator (no space).` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(2) + return check(cmd, args) + } + + cmd.PreRunE = root.MustAccountClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + a := root.AccountClient(ctx) + + if cmd.Flags().Changed("json") { + diags := updateJson.Unmarshal(&updateReq.Policy) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } + } + } + updateReq.PolicyId = args[0] + updateReq.UpdateMask = args[1] + + response, err := a.FederationPolicy.Update(ctx, updateReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateOverrides { + fn(cmd, &updateReq) + } + + return cmd +} + +// end service AccountFederationPolicy diff --git a/cmd/account/service-principal-federation-policy/service-principal-federation-policy.go b/cmd/account/service-principal-federation-policy/service-principal-federation-policy.go new file mode 100755 index 000000000..77f73bcd0 --- /dev/null +++ b/cmd/account/service-principal-federation-policy/service-principal-federation-policy.go @@ -0,0 +1,445 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package service_principal_federation_policy + +import ( + "fmt" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/oauth2" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "service-principal-federation-policy", + Short: `These APIs manage service principal federation policies.`, + Long: `These APIs manage service principal federation policies. + + Service principal federation, also known as Workload Identity Federation, + allows your automated workloads running outside of Databricks to securely + access Databricks APIs without the need for Databricks secrets. With Workload + Identity Federation, your application (or workload) authenticates to + Databricks as a Databricks service principal, using tokens provided by the + workload runtime. + + Databricks strongly recommends using Workload Identity Federation to + authenticate to Databricks from automated workloads, over alternatives such as + OAuth client secrets or Personal Access Tokens, whenever possible. Workload + Identity Federation is supported by many popular services, including Github + Actions, Azure DevOps, GitLab, Terraform Cloud, and Kubernetes clusters, among + others. + + Workload identity federation is configured in your Databricks account using a + service principal federation policy. A service principal federation policy + specifies: * which IdP, or issuer, the service principal is allowed to + authenticate from * which workload identity, or subject, is allowed to + authenticate as the Databricks service principal + + To configure a federation policy, you provide the following: * The required + token __issuer__, as specified in the “iss” claim of workload identity + tokens. The issuer is an https URL that identifies the workload identity + provider. * The required token __subject__, as specified in the “sub” + claim of workload identity tokens. The subject uniquely identifies the + workload in the workload runtime environment. * The allowed token + __audiences__, as specified in the “aud” claim of workload identity + tokens. The audience is intended to represent the recipient of the token. As + long as the audience in the token matches at least one audience in the policy, + the token is considered a match. If unspecified, the default value is your + Databricks account id. * Optionally, the public keys used to validate the + signature of the workload identity tokens, in JWKS format. If unspecified + (recommended), Databricks automatically fetches the public keys from the + issuer’s well known endpoint. Databricks strongly recommends relying on the + issuer’s well known endpoint for discovering public keys. + + An example service principal federation policy, for a Github Actions workload, + is: issuer: "https://token.actions.githubusercontent.com" audiences: + ["https://github.com/my-github-org"] subject: + "repo:my-github-org/my-repo:environment:prod" + + An example JWT token body that matches this policy and could be used to + authenticate to Databricks is: { "iss": + "https://token.actions.githubusercontent.com", "aud": + "https://github.com/my-github-org", "sub": + "repo:my-github-org/my-repo:environment:prod" } + + You may also need to configure the workload runtime to generate tokens for + your workloads. + + You do not need to configure an OAuth application in Databricks to use token + federation.`, + GroupID: "oauth2", + Annotations: map[string]string{ + "package": "oauth2", + }, + + // This service is being previewed; hide from help output. + Hidden: true, + } + + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newUpdate()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start create command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var createOverrides []func( + *cobra.Command, + *oauth2.CreateServicePrincipalFederationPolicyRequest, +) + +func newCreate() *cobra.Command { + cmd := &cobra.Command{} + + var createReq oauth2.CreateServicePrincipalFederationPolicyRequest + createReq.Policy = &oauth2.FederationPolicy{} + var createJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().StringVar(&createReq.Policy.Description, "description", createReq.Policy.Description, `Description of the federation policy.`) + cmd.Flags().StringVar(&createReq.Policy.Name, "name", createReq.Policy.Name, `Name of the federation policy.`) + // TODO: complex arg: oidc_policy + + cmd.Use = "create SERVICE_PRINCIPAL_ID" + cmd.Short = `Create service principal federation policy.` + cmd.Long = `Create service principal federation policy. + + Arguments: + SERVICE_PRINCIPAL_ID: The service principal id for the federation policy.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustAccountClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + a := root.AccountClient(ctx) + + if cmd.Flags().Changed("json") { + diags := createJson.Unmarshal(&createReq.Policy) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } + } + } + _, err = fmt.Sscan(args[0], &createReq.ServicePrincipalId) + if err != nil { + return fmt.Errorf("invalid SERVICE_PRINCIPAL_ID: %s", args[0]) + } + + response, err := a.ServicePrincipalFederationPolicy.Create(ctx, createReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range createOverrides { + fn(cmd, &createReq) + } + + return cmd +} + +// start delete command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteOverrides []func( + *cobra.Command, + *oauth2.DeleteServicePrincipalFederationPolicyRequest, +) + +func newDelete() *cobra.Command { + cmd := &cobra.Command{} + + var deleteReq oauth2.DeleteServicePrincipalFederationPolicyRequest + + // TODO: short flags + + cmd.Use = "delete SERVICE_PRINCIPAL_ID POLICY_ID" + cmd.Short = `Delete service principal federation policy.` + cmd.Long = `Delete service principal federation policy. + + Arguments: + SERVICE_PRINCIPAL_ID: The service principal id for the federation policy. + POLICY_ID: ` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(2) + return check(cmd, args) + } + + cmd.PreRunE = root.MustAccountClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + a := root.AccountClient(ctx) + + _, err = fmt.Sscan(args[0], &deleteReq.ServicePrincipalId) + if err != nil { + return fmt.Errorf("invalid SERVICE_PRINCIPAL_ID: %s", args[0]) + } + deleteReq.PolicyId = args[1] + + err = a.ServicePrincipalFederationPolicy.Delete(ctx, deleteReq) + if err != nil { + return err + } + return nil + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deleteOverrides { + fn(cmd, &deleteReq) + } + + return cmd +} + +// start get command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getOverrides []func( + *cobra.Command, + *oauth2.GetServicePrincipalFederationPolicyRequest, +) + +func newGet() *cobra.Command { + cmd := &cobra.Command{} + + var getReq oauth2.GetServicePrincipalFederationPolicyRequest + + // TODO: short flags + + cmd.Use = "get SERVICE_PRINCIPAL_ID POLICY_ID" + cmd.Short = `Get service principal federation policy.` + cmd.Long = `Get service principal federation policy. + + Arguments: + SERVICE_PRINCIPAL_ID: The service principal id for the federation policy. + POLICY_ID: ` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(2) + return check(cmd, args) + } + + cmd.PreRunE = root.MustAccountClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + a := root.AccountClient(ctx) + + _, err = fmt.Sscan(args[0], &getReq.ServicePrincipalId) + if err != nil { + return fmt.Errorf("invalid SERVICE_PRINCIPAL_ID: %s", args[0]) + } + getReq.PolicyId = args[1] + + response, err := a.ServicePrincipalFederationPolicy.Get(ctx, getReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getOverrides { + fn(cmd, &getReq) + } + + return cmd +} + +// start list command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var listOverrides []func( + *cobra.Command, + *oauth2.ListServicePrincipalFederationPoliciesRequest, +) + +func newList() *cobra.Command { + cmd := &cobra.Command{} + + var listReq oauth2.ListServicePrincipalFederationPoliciesRequest + + // TODO: short flags + + cmd.Flags().IntVar(&listReq.PageSize, "page-size", listReq.PageSize, ``) + cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, ``) + + cmd.Use = "list SERVICE_PRINCIPAL_ID" + cmd.Short = `List service principal federation policies.` + cmd.Long = `List service principal federation policies. + + Arguments: + SERVICE_PRINCIPAL_ID: The service principal id for the federation policy.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustAccountClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + a := root.AccountClient(ctx) + + _, err = fmt.Sscan(args[0], &listReq.ServicePrincipalId) + if err != nil { + return fmt.Errorf("invalid SERVICE_PRINCIPAL_ID: %s", args[0]) + } + + response := a.ServicePrincipalFederationPolicy.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range listOverrides { + fn(cmd, &listReq) + } + + return cmd +} + +// start update command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateOverrides []func( + *cobra.Command, + *oauth2.UpdateServicePrincipalFederationPolicyRequest, +) + +func newUpdate() *cobra.Command { + cmd := &cobra.Command{} + + var updateReq oauth2.UpdateServicePrincipalFederationPolicyRequest + updateReq.Policy = &oauth2.FederationPolicy{} + var updateJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().StringVar(&updateReq.Policy.Description, "description", updateReq.Policy.Description, `Description of the federation policy.`) + cmd.Flags().StringVar(&updateReq.Policy.Name, "name", updateReq.Policy.Name, `Name of the federation policy.`) + // TODO: complex arg: oidc_policy + + cmd.Use = "update SERVICE_PRINCIPAL_ID POLICY_ID UPDATE_MASK" + cmd.Short = `Update service principal federation policy.` + cmd.Long = `Update service principal federation policy. + + Arguments: + SERVICE_PRINCIPAL_ID: The service principal id for the federation policy. + POLICY_ID: + UPDATE_MASK: Field mask is required to be passed into the PATCH request. Field mask + specifies which fields of the setting payload will be updated. The field + mask needs to be supplied as single string. To specify multiple fields in + the field mask, use comma as the separator (no space).` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(3) + return check(cmd, args) + } + + cmd.PreRunE = root.MustAccountClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + a := root.AccountClient(ctx) + + if cmd.Flags().Changed("json") { + diags := updateJson.Unmarshal(&updateReq.Policy) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } + } + } + _, err = fmt.Sscan(args[0], &updateReq.ServicePrincipalId) + if err != nil { + return fmt.Errorf("invalid SERVICE_PRINCIPAL_ID: %s", args[0]) + } + updateReq.PolicyId = args[1] + updateReq.UpdateMask = args[2] + + response, err := a.ServicePrincipalFederationPolicy.Update(ctx, updateReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateOverrides { + fn(cmd, &updateReq) + } + + return cmd +} + +// end service ServicePrincipalFederationPolicy diff --git a/cmd/api/api.go b/cmd/api/api.go index d33939a52..c3a3eb0b6 100644 --- a/cmd/api/api.go +++ b/cmd/api/api.go @@ -39,7 +39,7 @@ func makeCommand(method string) *cobra.Command { Args: root.ExactArgs(1), Short: fmt.Sprintf("Perform %s request", method), RunE: func(cmd *cobra.Command, args []string) error { - var path = args[0] + path := args[0] var request any diags := payload.Unmarshal(&request) diff --git a/cmd/auth/describe.go b/cmd/auth/describe.go index 3a6e3d5d7..faaf64f8f 100644 --- a/cmd/auth/describe.go +++ b/cmd/auth/describe.go @@ -59,7 +59,6 @@ func newDescribeCommand() *cobra.Command { isAccount, err := root.MustAnyClient(cmd, args) return root.ConfigUsed(cmd.Context()), isAccount, err }) - if err != nil { return err } @@ -141,7 +140,10 @@ func render(ctx context.Context, cmd *cobra.Command, status *authStatus, templat if err != nil { return err } - cmd.OutOrStdout().Write(buf) + _, err = cmd.OutOrStdout().Write(buf) + if err != nil { + return err + } default: return fmt.Errorf("unknown output type %s", root.OutputType(cmd)) } diff --git a/cmd/auth/describe_test.go b/cmd/auth/describe_test.go index d0260abc7..7f5f900d4 100644 --- a/cmd/auth/describe_test.go +++ b/cmd/auth/describe_test.go @@ -31,7 +31,8 @@ func TestGetWorkspaceAuthStatus(t *testing.T) { cmd.Flags().String("host", "", "") cmd.Flags().String("profile", "", "") - cmd.Flag("profile").Value.Set("my-profile") + err := cmd.Flag("profile").Value.Set("my-profile") + require.NoError(t, err) cmd.Flag("profile").Changed = true cfg := &config.Config{ @@ -39,14 +40,16 @@ func TestGetWorkspaceAuthStatus(t *testing.T) { } m.WorkspaceClient.Config = cfg t.Setenv("DATABRICKS_AUTH_TYPE", "azure-cli") - config.ConfigAttributes.Configure(cfg) + err = config.ConfigAttributes.Configure(cfg) + require.NoError(t, err) status, err := getAuthStatus(cmd, []string{}, showSensitive, func(cmd *cobra.Command, args []string) (*config.Config, bool, error) { - config.ConfigAttributes.ResolveFromStringMap(cfg, map[string]string{ + err := config.ConfigAttributes.ResolveFromStringMap(cfg, map[string]string{ "host": "https://test.com", "token": "test-token", "auth_type": "azure-cli", }) + require.NoError(t, err) return cfg, false, nil }) require.NoError(t, err) @@ -81,7 +84,8 @@ func TestGetWorkspaceAuthStatusError(t *testing.T) { cmd.Flags().String("host", "", "") cmd.Flags().String("profile", "", "") - cmd.Flag("profile").Value.Set("my-profile") + err := cmd.Flag("profile").Value.Set("my-profile") + require.NoError(t, err) cmd.Flag("profile").Changed = true cfg := &config.Config{ @@ -89,10 +93,11 @@ func TestGetWorkspaceAuthStatusError(t *testing.T) { } m.WorkspaceClient.Config = cfg t.Setenv("DATABRICKS_AUTH_TYPE", "azure-cli") - config.ConfigAttributes.Configure(cfg) + err = config.ConfigAttributes.Configure(cfg) + require.NoError(t, err) status, err := getAuthStatus(cmd, []string{}, showSensitive, func(cmd *cobra.Command, args []string) (*config.Config, bool, error) { - config.ConfigAttributes.ResolveFromStringMap(cfg, map[string]string{ + err = config.ConfigAttributes.ResolveFromStringMap(cfg, map[string]string{ "host": "https://test.com", "token": "test-token", "auth_type": "azure-cli", @@ -128,7 +133,8 @@ func TestGetWorkspaceAuthStatusSensitive(t *testing.T) { cmd.Flags().String("host", "", "") cmd.Flags().String("profile", "", "") - cmd.Flag("profile").Value.Set("my-profile") + err := cmd.Flag("profile").Value.Set("my-profile") + require.NoError(t, err) cmd.Flag("profile").Changed = true cfg := &config.Config{ @@ -136,10 +142,11 @@ func TestGetWorkspaceAuthStatusSensitive(t *testing.T) { } m.WorkspaceClient.Config = cfg t.Setenv("DATABRICKS_AUTH_TYPE", "azure-cli") - config.ConfigAttributes.Configure(cfg) + err = config.ConfigAttributes.Configure(cfg) + require.NoError(t, err) status, err := getAuthStatus(cmd, []string{}, showSensitive, func(cmd *cobra.Command, args []string) (*config.Config, bool, error) { - config.ConfigAttributes.ResolveFromStringMap(cfg, map[string]string{ + err = config.ConfigAttributes.ResolveFromStringMap(cfg, map[string]string{ "host": "https://test.com", "token": "test-token", "auth_type": "azure-cli", @@ -171,7 +178,8 @@ func TestGetAccountAuthStatus(t *testing.T) { cmd.Flags().String("host", "", "") cmd.Flags().String("profile", "", "") - cmd.Flag("profile").Value.Set("my-profile") + err := cmd.Flag("profile").Value.Set("my-profile") + require.NoError(t, err) cmd.Flag("profile").Changed = true cfg := &config.Config{ @@ -179,13 +187,14 @@ func TestGetAccountAuthStatus(t *testing.T) { } m.AccountClient.Config = cfg t.Setenv("DATABRICKS_AUTH_TYPE", "azure-cli") - config.ConfigAttributes.Configure(cfg) + err = config.ConfigAttributes.Configure(cfg) + require.NoError(t, err) wsApi := m.GetMockWorkspacesAPI() wsApi.EXPECT().List(mock.Anything).Return(nil, nil) status, err := getAuthStatus(cmd, []string{}, showSensitive, func(cmd *cobra.Command, args []string) (*config.Config, bool, error) { - config.ConfigAttributes.ResolveFromStringMap(cfg, map[string]string{ + err = config.ConfigAttributes.ResolveFromStringMap(cfg, map[string]string{ "account_id": "test-account-id", "username": "test-user", "host": "https://test.com", diff --git a/cmd/auth/env.go b/cmd/auth/env.go index e72d15399..52b7cbbfd 100644 --- a/cmd/auth/env.go +++ b/cmd/auth/env.go @@ -138,7 +138,7 @@ func newEnvCommand() *cobra.Command { if err != nil { return err } - cmd.OutOrStdout().Write(raw) + _, _ = cmd.OutOrStdout().Write(raw) return nil } diff --git a/cmd/auth/login.go b/cmd/auth/login.go index 79b795468..c98676599 100644 --- a/cmd/auth/login.go +++ b/cmd/auth/login.go @@ -29,8 +29,10 @@ func promptForProfile(ctx context.Context, defaultValue string) (string, error) return prompt.Run() } -const minimalDbConnectVersion = "13.1" -const defaultTimeout = 1 * time.Hour +const ( + minimalDbConnectVersion = "13.1" + defaultTimeout = 1 * time.Hour +) func newLoginCommand(persistentAuth *auth.PersistentAuth) *cobra.Command { defaultConfigPath := "~/.databrickscfg" diff --git a/cmd/auth/token.go b/cmd/auth/token.go index 3f9af43fa..fbf8b68f6 100644 --- a/cmd/auth/token.go +++ b/cmd/auth/token.go @@ -94,7 +94,7 @@ func newTokenCommand(persistentAuth *auth.PersistentAuth) *cobra.Command { if err != nil { return err } - cmd.OutOrStdout().Write(raw) + _, _ = cmd.OutOrStdout().Write(raw) return nil } diff --git a/cmd/bundle/debug/terraform.go b/cmd/bundle/debug/terraform.go index 843ecac4e..c7d49ebb2 100644 --- a/cmd/bundle/debug/terraform.go +++ b/cmd/bundle/debug/terraform.go @@ -60,13 +60,13 @@ For more information about filesystem mirrors, see the Terraform documentation: } switch root.OutputType(cmd) { case flags.OutputText: - cmdio.Render(cmd.Context(), dependencies.Terraform) + _ = cmdio.Render(cmd.Context(), dependencies.Terraform) case flags.OutputJSON: buf, err := json.MarshalIndent(dependencies, "", " ") if err != nil { return err } - cmd.OutOrStdout().Write(buf) + _, _ = cmd.OutOrStdout().Write(buf) default: return fmt.Errorf("unknown output type %s", root.OutputType(cmd)) } diff --git a/cmd/bundle/generate/dashboard.go b/cmd/bundle/generate/dashboard.go index 4a538a293..f196bbe62 100644 --- a/cmd/bundle/generate/dashboard.go +++ b/cmd/bundle/generate/dashboard.go @@ -158,7 +158,7 @@ func (d *dashboard) saveSerializedDashboard(_ context.Context, b *bundle.Bundle, } // Make sure the output directory exists. - if err := os.MkdirAll(filepath.Dir(filename), 0755); err != nil { + if err := os.MkdirAll(filepath.Dir(filename), 0o755); err != nil { return err } @@ -183,7 +183,7 @@ func (d *dashboard) saveSerializedDashboard(_ context.Context, b *bundle.Bundle, } fmt.Printf("Writing dashboard to %q\n", rel) - return os.WriteFile(filename, data, 0644) + return os.WriteFile(filename, data, 0o644) } func (d *dashboard) saveConfiguration(ctx context.Context, b *bundle.Bundle, dashboard *dashboards.Dashboard, key string) error { @@ -210,7 +210,7 @@ func (d *dashboard) saveConfiguration(ctx context.Context, b *bundle.Bundle, das } // Make sure the output directory exists. - if err := os.MkdirAll(d.resourceDir, 0755); err != nil { + if err := os.MkdirAll(d.resourceDir, 0o755); err != nil { return err } diff --git a/cmd/bundle/generate/dashboard_test.go b/cmd/bundle/generate/dashboard_test.go index 6741e6a39..f1161950b 100644 --- a/cmd/bundle/generate/dashboard_test.go +++ b/cmd/bundle/generate/dashboard_test.go @@ -67,9 +67,10 @@ func TestDashboard_ExistingID_Nominal(t *testing.T) { ctx := bundle.Context(context.Background(), b) cmd := NewGenerateDashboardCommand() cmd.SetContext(ctx) - cmd.Flag("existing-id").Value.Set("f00dcafe") + err := cmd.Flag("existing-id").Value.Set("f00dcafe") + require.NoError(t, err) - err := cmd.RunE(cmd, []string{}) + err = cmd.RunE(cmd, []string{}) require.NoError(t, err) // Assert the contents of the generated configuration @@ -105,9 +106,10 @@ func TestDashboard_ExistingID_NotFound(t *testing.T) { ctx := bundle.Context(context.Background(), b) cmd := NewGenerateDashboardCommand() cmd.SetContext(ctx) - cmd.Flag("existing-id").Value.Set("f00dcafe") + err := cmd.Flag("existing-id").Value.Set("f00dcafe") + require.NoError(t, err) - err := cmd.RunE(cmd, []string{}) + err = cmd.RunE(cmd, []string{}) require.Error(t, err) } @@ -137,9 +139,10 @@ func TestDashboard_ExistingPath_Nominal(t *testing.T) { ctx := bundle.Context(context.Background(), b) cmd := NewGenerateDashboardCommand() cmd.SetContext(ctx) - cmd.Flag("existing-path").Value.Set("/path/to/dashboard") + err := cmd.Flag("existing-path").Value.Set("/path/to/dashboard") + require.NoError(t, err) - err := cmd.RunE(cmd, []string{}) + err = cmd.RunE(cmd, []string{}) require.NoError(t, err) // Assert the contents of the generated configuration @@ -175,8 +178,9 @@ func TestDashboard_ExistingPath_NotFound(t *testing.T) { ctx := bundle.Context(context.Background(), b) cmd := NewGenerateDashboardCommand() cmd.SetContext(ctx) - cmd.Flag("existing-path").Value.Set("/path/to/dashboard") + err := cmd.Flag("existing-path").Value.Set("/path/to/dashboard") + require.NoError(t, err) - err := cmd.RunE(cmd, []string{}) + err = cmd.RunE(cmd, []string{}) require.Error(t, err) } diff --git a/cmd/bundle/generate/generate_test.go b/cmd/bundle/generate/generate_test.go index bc1549e64..896b7de51 100644 --- a/cmd/bundle/generate/generate_test.go +++ b/cmd/bundle/generate/generate_test.go @@ -78,13 +78,13 @@ func TestGeneratePipelineCommand(t *testing.T) { workspaceApi.EXPECT().Download(mock.Anything, "/test/file.py", mock.Anything).Return(pyContent, nil) cmd.SetContext(bundle.Context(context.Background(), b)) - cmd.Flag("existing-pipeline-id").Value.Set("test-pipeline") + require.NoError(t, cmd.Flag("existing-pipeline-id").Value.Set("test-pipeline")) configDir := filepath.Join(root, "resources") - cmd.Flag("config-dir").Value.Set(configDir) + require.NoError(t, cmd.Flag("config-dir").Value.Set(configDir)) srcDir := filepath.Join(root, "src") - cmd.Flag("source-dir").Value.Set(srcDir) + require.NoError(t, cmd.Flag("source-dir").Value.Set(srcDir)) var key string cmd.Flags().StringVar(&key, "key", "test_pipeline", "") @@ -174,13 +174,13 @@ func TestGenerateJobCommand(t *testing.T) { workspaceApi.EXPECT().Download(mock.Anything, "/test/notebook", mock.Anything).Return(notebookContent, nil) cmd.SetContext(bundle.Context(context.Background(), b)) - cmd.Flag("existing-job-id").Value.Set("1234") + require.NoError(t, cmd.Flag("existing-job-id").Value.Set("1234")) configDir := filepath.Join(root, "resources") - cmd.Flag("config-dir").Value.Set(configDir) + require.NoError(t, cmd.Flag("config-dir").Value.Set(configDir)) srcDir := filepath.Join(root, "src") - cmd.Flag("source-dir").Value.Set(srcDir) + require.NoError(t, cmd.Flag("source-dir").Value.Set(srcDir)) var key string cmd.Flags().StringVar(&key, "key", "test_job", "") @@ -217,7 +217,7 @@ func TestGenerateJobCommand(t *testing.T) { } func touchEmptyFile(t *testing.T, path string) { - err := os.MkdirAll(filepath.Dir(path), 0700) + err := os.MkdirAll(filepath.Dir(path), 0o700) require.NoError(t, err) f, err := os.Create(path) require.NoError(t, err) @@ -279,13 +279,13 @@ func TestGenerateJobCommandOldFileRename(t *testing.T) { workspaceApi.EXPECT().Download(mock.Anything, "/test/notebook", mock.Anything).Return(notebookContent, nil) cmd.SetContext(bundle.Context(context.Background(), b)) - cmd.Flag("existing-job-id").Value.Set("1234") + require.NoError(t, cmd.Flag("existing-job-id").Value.Set("1234")) configDir := filepath.Join(root, "resources") - cmd.Flag("config-dir").Value.Set(configDir) + require.NoError(t, cmd.Flag("config-dir").Value.Set(configDir)) srcDir := filepath.Join(root, "src") - cmd.Flag("source-dir").Value.Set(srcDir) + require.NoError(t, cmd.Flag("source-dir").Value.Set(srcDir)) var key string cmd.Flags().StringVar(&key, "key", "test_job", "") @@ -295,7 +295,7 @@ func TestGenerateJobCommandOldFileRename(t *testing.T) { touchEmptyFile(t, oldFilename) // Having an existing files require --force flag to regenerate them - cmd.Flag("force").Value.Set("true") + require.NoError(t, cmd.Flag("force").Value.Set("true")) err := cmd.RunE(cmd, []string{}) require.NoError(t, err) diff --git a/cmd/bundle/generate/utils.go b/cmd/bundle/generate/utils.go index 65f692419..8e3764e35 100644 --- a/cmd/bundle/generate/utils.go +++ b/cmd/bundle/generate/utils.go @@ -87,7 +87,7 @@ func (n *downloader) markNotebookForDownload(ctx context.Context, notebookPath * } func (n *downloader) FlushToDisk(ctx context.Context, force bool) error { - err := os.MkdirAll(n.sourceDir, 0755) + err := os.MkdirAll(n.sourceDir, 0o755) if err != nil { return err } @@ -134,7 +134,7 @@ func (n *downloader) FlushToDisk(ctx context.Context, force bool) error { return errs.Wait() } -func newDownloader(w *databricks.WorkspaceClient, sourceDir string, configDir string) *downloader { +func newDownloader(w *databricks.WorkspaceClient, sourceDir, configDir string) *downloader { return &downloader{ files: make(map[string]string), w: w, diff --git a/cmd/bundle/run.go b/cmd/bundle/run.go index 7a92766d9..3bcebddd5 100644 --- a/cmd/bundle/run.go +++ b/cmd/bundle/run.go @@ -159,13 +159,19 @@ task or a Python wheel task, the second example applies. if err != nil { return err } - cmd.OutOrStdout().Write([]byte(resultString)) + _, err = cmd.OutOrStdout().Write([]byte(resultString)) + if err != nil { + return err + } case flags.OutputJSON: b, err := json.MarshalIndent(output, "", " ") if err != nil { return err } - cmd.OutOrStdout().Write(b) + _, err = cmd.OutOrStdout().Write(b) + if err != nil { + return err + } default: return fmt.Errorf("unknown output type %s", root.OutputType(cmd)) } diff --git a/cmd/bundle/summary.go b/cmd/bundle/summary.go index 8c34dd612..7c669c845 100644 --- a/cmd/bundle/summary.go +++ b/cmd/bundle/summary.go @@ -73,7 +73,7 @@ func newSummaryCommand() *cobra.Command { if err != nil { return err } - cmd.OutOrStdout().Write(buf) + _, _ = cmd.OutOrStdout().Write(buf) default: return fmt.Errorf("unknown output type %s", root.OutputType(cmd)) } diff --git a/cmd/bundle/validate.go b/cmd/bundle/validate.go index 5331e7e7b..3b50cc258 100644 --- a/cmd/bundle/validate.go +++ b/cmd/bundle/validate.go @@ -20,7 +20,7 @@ func renderJsonOutput(cmd *cobra.Command, b *bundle.Bundle, diags diag.Diagnosti if err != nil { return err } - cmd.OutOrStdout().Write(buf) + _, _ = cmd.OutOrStdout().Write(buf) return diags.Error() } diff --git a/cmd/configure/configure_test.go b/cmd/configure/configure_test.go index a127fe57a..e2f6c1e29 100644 --- a/cmd/configure/configure_test.go +++ b/cmd/configure/configure_test.go @@ -31,7 +31,7 @@ func setup(t *testing.T) string { return tempHomeDir } -func getTempFileWithContent(t *testing.T, tempHomeDir string, content string) *os.File { +func getTempFileWithContent(t *testing.T, tempHomeDir, content string) *os.File { inp, err := os.CreateTemp(tempHomeDir, "input") assert.NoError(t, err) _, err = inp.WriteString(content) @@ -75,7 +75,7 @@ func TestDefaultConfigureNoInteractive(t *testing.T) { } func TestConfigFileFromEnvNoInteractive(t *testing.T) { - //TODO: Replace with similar test code from go SDK, once we start using it directly + // TODO: Replace with similar test code from go SDK, once we start using it directly ctx := context.Background() tempHomeDir := setup(t) defaultCfgPath := filepath.Join(tempHomeDir, ".databrickscfg") diff --git a/cmd/labs/github/github.go b/cmd/labs/github/github.go index 1dd9fae5e..a67df1022 100644 --- a/cmd/labs/github/github.go +++ b/cmd/labs/github/github.go @@ -12,12 +12,16 @@ import ( "github.com/databricks/cli/libs/log" ) -const gitHubAPI = "https://api.github.com" -const gitHubUserContent = "https://raw.githubusercontent.com" +const ( + gitHubAPI = "https://api.github.com" + gitHubUserContent = "https://raw.githubusercontent.com" +) // Placeholders to use as unique keys in context.Context. -var apiOverride int -var userContentOverride int +var ( + apiOverride int + userContentOverride int +) func WithApiOverride(ctx context.Context, override string) context.Context { return context.WithValue(ctx, &apiOverride, override) diff --git a/cmd/labs/github/ref_test.go b/cmd/labs/github/ref_test.go index 2a9ffcc5b..cc27d1e81 100644 --- a/cmd/labs/github/ref_test.go +++ b/cmd/labs/github/ref_test.go @@ -7,12 +7,14 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestFileFromRef(t *testing.T) { server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.Path == "/databrickslabs/ucx/main/README.md" { - w.Write([]byte(`abc`)) + _, err := w.Write([]byte(`abc`)) + require.NoError(t, err) return } t.Logf("Requested: %s", r.URL.Path) @@ -31,7 +33,8 @@ func TestFileFromRef(t *testing.T) { func TestDownloadZipball(t *testing.T) { server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.Path == "/repos/databrickslabs/ucx/zipball/main" { - w.Write([]byte(`abc`)) + _, err := w.Write([]byte(`abc`)) + require.NoError(t, err) return } t.Logf("Requested: %s", r.URL.Path) diff --git a/cmd/labs/github/releases_test.go b/cmd/labs/github/releases_test.go index ea24a1e2e..9c3d7a959 100644 --- a/cmd/labs/github/releases_test.go +++ b/cmd/labs/github/releases_test.go @@ -7,12 +7,14 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestLoadsReleasesForCLI(t *testing.T) { server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.Path == "/repos/databricks/cli/releases" { - w.Write([]byte(`[{"tag_name": "v1.2.3"}, {"tag_name": "v1.2.2"}]`)) + _, err := w.Write([]byte(`[{"tag_name": "v1.2.3"}, {"tag_name": "v1.2.2"}]`)) + require.NoError(t, err) return } t.Logf("Requested: %s", r.URL.Path) diff --git a/cmd/labs/github/repositories_test.go b/cmd/labs/github/repositories_test.go index 4f2fef3e1..412b440bc 100644 --- a/cmd/labs/github/repositories_test.go +++ b/cmd/labs/github/repositories_test.go @@ -7,12 +7,14 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestRepositories(t *testing.T) { server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.Path == "/users/databrickslabs/repos" { - w.Write([]byte(`[{"name": "x"}]`)) + _, err := w.Write([]byte(`[{"name": "x"}]`)) + require.NoError(t, err) return } t.Logf("Requested: %s", r.URL.Path) diff --git a/cmd/labs/installed_test.go b/cmd/labs/installed_test.go index 00692f796..3c38e5e11 100644 --- a/cmd/labs/installed_test.go +++ b/cmd/labs/installed_test.go @@ -4,14 +4,14 @@ import ( "context" "testing" - "github.com/databricks/cli/internal" + "github.com/databricks/cli/internal/testcli" "github.com/databricks/cli/libs/env" ) func TestListsInstalledProjects(t *testing.T) { ctx := context.Background() ctx = env.WithUserHomeDir(ctx, "project/testdata/installed-in-home") - r := internal.NewCobraTestRunnerWithContext(t, ctx, "labs", "installed") + r := testcli.NewRunner(t, ctx, "labs", "installed") r.RunAndExpectOutput(` Name Description Version blueprint Blueprint Project v0.3.15 diff --git a/cmd/labs/list_test.go b/cmd/labs/list_test.go index 925b984ab..4388fdd0e 100644 --- a/cmd/labs/list_test.go +++ b/cmd/labs/list_test.go @@ -4,7 +4,7 @@ import ( "context" "testing" - "github.com/databricks/cli/internal" + "github.com/databricks/cli/internal/testcli" "github.com/databricks/cli/libs/env" "github.com/stretchr/testify/require" ) @@ -12,7 +12,7 @@ import ( func TestListingWorks(t *testing.T) { ctx := context.Background() ctx = env.WithUserHomeDir(ctx, "project/testdata/installed-in-home") - c := internal.NewCobraTestRunnerWithContext(t, ctx, "labs", "list") + c := testcli.NewRunner(t, ctx, "labs", "list") stdout, _, err := c.Run() require.NoError(t, err) require.Contains(t, stdout.String(), "ucx") diff --git a/cmd/labs/localcache/jsonfile.go b/cmd/labs/localcache/jsonfile.go index 495743a57..6540e4ac2 100644 --- a/cmd/labs/localcache/jsonfile.go +++ b/cmd/labs/localcache/jsonfile.go @@ -14,8 +14,10 @@ import ( "github.com/databricks/cli/libs/log" ) -const userRW = 0o600 -const ownerRWXworldRX = 0o755 +const ( + userRW = 0o600 + ownerRWXworldRX = 0o755 +) func NewLocalCache[T any](dir, name string, validity time.Duration) LocalCache[T] { return LocalCache[T]{ diff --git a/cmd/labs/project/command_test.go b/cmd/labs/project/command_test.go index 20021879f..453329e1d 100644 --- a/cmd/labs/project/command_test.go +++ b/cmd/labs/project/command_test.go @@ -6,7 +6,7 @@ import ( "testing" "time" - "github.com/databricks/cli/internal" + "github.com/databricks/cli/internal/testcli" "github.com/databricks/cli/libs/env" "github.com/databricks/cli/libs/python" "github.com/databricks/databricks-sdk-go" @@ -30,7 +30,7 @@ func devEnvContext(t *testing.T) context.Context { func TestRunningBlueprintEcho(t *testing.T) { ctx := devEnvContext(t) - r := internal.NewCobraTestRunnerWithContext(t, ctx, "labs", "blueprint", "echo") + r := testcli.NewRunner(t, ctx, "labs", "blueprint", "echo") var out echoOut r.RunAndParseJSON(&out) assert.Equal(t, "echo", out.Command) @@ -41,14 +41,14 @@ func TestRunningBlueprintEcho(t *testing.T) { func TestRunningBlueprintEchoProfileWrongOverride(t *testing.T) { ctx := devEnvContext(t) - r := internal.NewCobraTestRunnerWithContext(t, ctx, "labs", "blueprint", "echo", "--profile", "workspace-profile") + r := testcli.NewRunner(t, ctx, "labs", "blueprint", "echo", "--profile", "workspace-profile") _, _, err := r.Run() assert.ErrorIs(t, err, databricks.ErrNotAccountClient) } func TestRunningCommand(t *testing.T) { ctx := devEnvContext(t) - r := internal.NewCobraTestRunnerWithContext(t, ctx, "labs", "blueprint", "foo") + r := testcli.NewRunner(t, ctx, "labs", "blueprint", "foo") r.WithStdin() defer r.CloseStdin() @@ -60,7 +60,7 @@ func TestRunningCommand(t *testing.T) { func TestRenderingTable(t *testing.T) { ctx := devEnvContext(t) - r := internal.NewCobraTestRunnerWithContext(t, ctx, "labs", "blueprint", "table") + r := testcli.NewRunner(t, ctx, "labs", "blueprint", "table") r.RunAndExpectOutput(` Key Value First Second diff --git a/cmd/labs/project/entrypoint.go b/cmd/labs/project/entrypoint.go index 99edf83c8..2bed49145 100644 --- a/cmd/labs/project/entrypoint.go +++ b/cmd/labs/project/entrypoint.go @@ -30,10 +30,12 @@ type Entrypoint struct { IsBundleAware bool `yaml:"is_bundle_aware,omitempty"` } -var ErrNoLoginConfig = errors.New("no login configuration found") -var ErrMissingClusterID = errors.New("missing a cluster compatible with Databricks Connect") -var ErrMissingWarehouseID = errors.New("missing a SQL warehouse") -var ErrNotInTTY = errors.New("not in an interactive terminal") +var ( + ErrNoLoginConfig = errors.New("no login configuration found") + ErrMissingClusterID = errors.New("missing a cluster compatible with Databricks Connect") + ErrMissingWarehouseID = errors.New("missing a SQL warehouse") + ErrNotInTTY = errors.New("not in an interactive terminal") +) func (e *Entrypoint) NeedsCluster() bool { if e.Installer == nil { @@ -190,9 +192,6 @@ func (e *Entrypoint) getLoginConfig(cmd *cobra.Command) (*loginConfig, *config.C if isNoLoginConfig && !e.IsBundleAware { return nil, nil, ErrNoLoginConfig } - if !isNoLoginConfig && err != nil { - return nil, nil, fmt.Errorf("load: %w", err) - } if e.IsAccountLevel { log.Debugf(ctx, "Using account-level login profile: %s", lc.AccountProfile) cfg, err := e.envAwareConfigWithProfile(ctx, lc.AccountProfile) diff --git a/cmd/labs/project/installer_test.go b/cmd/labs/project/installer_test.go index 1e45fafe6..a69389b31 100644 --- a/cmd/labs/project/installer_test.go +++ b/cmd/labs/project/installer_test.go @@ -19,7 +19,7 @@ import ( "github.com/databricks/cli/cmd/labs/github" "github.com/databricks/cli/cmd/labs/project" - "github.com/databricks/cli/internal" + "github.com/databricks/cli/internal/testcli" "github.com/databricks/cli/libs/env" "github.com/databricks/cli/libs/process" "github.com/databricks/cli/libs/python" @@ -29,8 +29,10 @@ import ( "github.com/stretchr/testify/require" ) -const ownerRWXworldRX = 0o755 -const ownerRW = 0o600 +const ( + ownerRWXworldRX = 0o755 + ownerRW = 0o600 +) func zipballFromFolder(src string) ([]byte, error) { var buf bytes.Buffer @@ -117,10 +119,10 @@ func installerContext(t *testing.T, server *httptest.Server) context.Context { func respondWithJSON(t *testing.T, w http.ResponseWriter, v any) { raw, err := json.Marshal(v) - if err != nil { - require.NoError(t, err) - } - w.Write(raw) + require.NoError(t, err) + + _, err = w.Write(raw) + require.NoError(t, err) } type fileTree struct { @@ -167,19 +169,17 @@ func TestInstallerWorksForReleases(t *testing.T) { server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.Path == "/databrickslabs/blueprint/v0.3.15/labs.yml" { raw, err := os.ReadFile("testdata/installed-in-home/.databricks/labs/blueprint/lib/labs.yml") - if err != nil { - panic(err) - } - w.Write(raw) + require.NoError(t, err) + _, err = w.Write(raw) + require.NoError(t, err) return } if r.URL.Path == "/repos/databrickslabs/blueprint/zipball/v0.3.15" { raw, err := zipballFromFolder("testdata/installed-in-home/.databricks/labs/blueprint/lib") - if err != nil { - panic(err) - } + require.NoError(t, err) w.Header().Add("Content-Type", "application/octet-stream") - w.Write(raw) + _, err = w.Write(raw) + require.NoError(t, err) return } if r.URL.Path == "/api/2.1/clusters/get" { @@ -236,7 +236,7 @@ func TestInstallerWorksForReleases(t *testing.T) { // │ │ │ └── site-packages // │ │ │ ├── ... // │ │ │ ├── distutils-precedence.pth - r := internal.NewCobraTestRunnerWithContext(t, ctx, "labs", "install", "blueprint", "--debug") + r := testcli.NewRunner(t, ctx, "labs", "install", "blueprint", "--debug") r.RunAndExpectOutput("setting up important infrastructure") } @@ -314,7 +314,10 @@ func TestInstallerWorksForDevelopment(t *testing.T) { defer server.Close() wd, _ := os.Getwd() - defer os.Chdir(wd) + defer func() { + err := os.Chdir(wd) + require.NoError(t, err) + }() devDir := copyTestdata(t, "testdata/installed-in-home/.databricks/labs/blueprint/lib") err := os.Chdir(devDir) @@ -353,7 +356,7 @@ account_id = abc // └── databrickslabs-blueprint-releases.json // `databricks labs install .` means "verify this installer i'm developing does work" - r := internal.NewCobraTestRunnerWithContext(t, ctx, "labs", "install", ".") + r := testcli.NewRunner(t, ctx, "labs", "install", ".") r.WithStdin() defer r.CloseStdin() @@ -373,19 +376,17 @@ func TestUpgraderWorksForReleases(t *testing.T) { server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.Path == "/databrickslabs/blueprint/v0.4.0/labs.yml" { raw, err := os.ReadFile("testdata/installed-in-home/.databricks/labs/blueprint/lib/labs.yml") - if err != nil { - panic(err) - } - w.Write(raw) + require.NoError(t, err) + _, err = w.Write(raw) + require.NoError(t, err) return } if r.URL.Path == "/repos/databrickslabs/blueprint/zipball/v0.4.0" { raw, err := zipballFromFolder("testdata/installed-in-home/.databricks/labs/blueprint/lib") - if err != nil { - panic(err) - } + require.NoError(t, err) w.Header().Add("Content-Type", "application/octet-stream") - w.Write(raw) + _, err = w.Write(raw) + require.NoError(t, err) return } if r.URL.Path == "/api/2.1/clusters/get" { @@ -425,7 +426,7 @@ func TestUpgraderWorksForReleases(t *testing.T) { ctx = env.Set(ctx, "DATABRICKS_CLUSTER_ID", "installer-cluster") ctx = env.Set(ctx, "DATABRICKS_WAREHOUSE_ID", "installer-warehouse") - r := internal.NewCobraTestRunnerWithContext(t, ctx, "labs", "upgrade", "blueprint") + r := testcli.NewRunner(t, ctx, "labs", "upgrade", "blueprint") r.RunAndExpectOutput("setting up important infrastructure") // Check if the stub was called with the 'python -m pip install' command diff --git a/cmd/root/auth.go b/cmd/root/auth.go index 107679105..07ab48399 100644 --- a/cmd/root/auth.go +++ b/cmd/root/auth.go @@ -15,9 +15,11 @@ import ( ) // Placeholders to use as unique keys in context.Context. -var workspaceClient int -var accountClient int -var configUsed int +var ( + workspaceClient int + accountClient int + configUsed int +) type ErrNoWorkspaceProfiles struct { path string diff --git a/cmd/root/auth_test.go b/cmd/root/auth_test.go index 9ba2a8fa9..784598796 100644 --- a/cmd/root/auth_test.go +++ b/cmd/root/auth_test.go @@ -15,7 +15,8 @@ import ( ) func TestEmptyHttpRequest(t *testing.T) { - ctx, _ := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() req := emptyHttpRequest(ctx) assert.Equal(t, req.Context(), ctx) } @@ -83,7 +84,7 @@ func TestAccountClientOrPrompt(t *testing.T) { account_id = 1112 token = foobar `), - 0755) + 0o755) require.NoError(t, err) t.Setenv("DATABRICKS_CONFIG_FILE", configFile) t.Setenv("PATH", "/nothing") @@ -149,7 +150,7 @@ func TestWorkspaceClientOrPrompt(t *testing.T) { host = https://adb-1112.12.azuredatabricks.net/ token = foobar `), - 0755) + 0o755) require.NoError(t, err) t.Setenv("DATABRICKS_CONFIG_FILE", configFile) t.Setenv("PATH", "/nothing") @@ -203,7 +204,7 @@ func TestMustAccountClientWorksWithDatabricksCfg(t *testing.T) { account_id = 1111 token = foobar `), - 0755) + 0o755) require.NoError(t, err) cmd := New(context.Background()) @@ -250,7 +251,7 @@ func TestMustAnyClientCanCreateWorkspaceClient(t *testing.T) { host = https://adb-1111.11.azuredatabricks.net/ token = foobar `), - 0755) + 0o755) require.NoError(t, err) ctx, tt := cmdio.SetupTest(context.Background()) @@ -279,7 +280,7 @@ func TestMustAnyClientCanCreateAccountClient(t *testing.T) { account_id = 1111 token = foobar `), - 0755) + 0o755) require.NoError(t, err) ctx, tt := cmdio.SetupTest(context.Background()) @@ -303,7 +304,7 @@ func TestMustAnyClientWithEmptyDatabricksCfg(t *testing.T) { err := os.WriteFile( configFile, []byte(""), // empty file - 0755) + 0o755) require.NoError(t, err) ctx, tt := cmdio.SetupTest(context.Background()) diff --git a/cmd/root/bundle_test.go b/cmd/root/bundle_test.go index 301884287..1998b19e6 100644 --- a/cmd/root/bundle_test.go +++ b/cmd/root/bundle_test.go @@ -23,7 +23,7 @@ func setupDatabricksCfg(t *testing.T) { } cfg := []byte("[PROFILE-1]\nhost = https://a.com\ntoken = a\n[PROFILE-2]\nhost = https://a.com\ntoken = b\n") - err := os.WriteFile(filepath.Join(tempHomeDir, ".databrickscfg"), cfg, 0644) + err := os.WriteFile(filepath.Join(tempHomeDir, ".databrickscfg"), cfg, 0o644) assert.NoError(t, err) t.Setenv("DATABRICKS_CONFIG_FILE", "") @@ -48,7 +48,7 @@ func setupWithHost(t *testing.T, cmd *cobra.Command, host string) *bundle.Bundle workspace: host: %q `, host) - err := os.WriteFile(filepath.Join(rootPath, "databricks.yml"), []byte(contents), 0644) + err := os.WriteFile(filepath.Join(rootPath, "databricks.yml"), []byte(contents), 0o644) require.NoError(t, err) b, diags := MustConfigureBundle(cmd) @@ -66,7 +66,7 @@ func setupWithProfile(t *testing.T, cmd *cobra.Command, profile string) *bundle. workspace: profile: %q `, profile) - err := os.WriteFile(filepath.Join(rootPath, "databricks.yml"), []byte(contents), 0644) + err := os.WriteFile(filepath.Join(rootPath, "databricks.yml"), []byte(contents), 0o644) require.NoError(t, err) b, diags := MustConfigureBundle(cmd) @@ -99,10 +99,11 @@ func TestBundleConfigureWithNonExistentProfileFlag(t *testing.T) { testutil.CleanupEnvironment(t) cmd := emptyCommand(t) - cmd.Flag("profile").Value.Set("NOEXIST") + err := cmd.Flag("profile").Value.Set("NOEXIST") + require.NoError(t, err) b := setupWithHost(t, cmd, "https://x.com") - _, err := b.InitializeWorkspaceClient() + _, err = b.InitializeWorkspaceClient() assert.ErrorContains(t, err, "has no NOEXIST profile configured") } @@ -110,10 +111,11 @@ func TestBundleConfigureWithMismatchedProfile(t *testing.T) { testutil.CleanupEnvironment(t) cmd := emptyCommand(t) - cmd.Flag("profile").Value.Set("PROFILE-1") + err := cmd.Flag("profile").Value.Set("PROFILE-1") + require.NoError(t, err) b := setupWithHost(t, cmd, "https://x.com") - _, err := b.InitializeWorkspaceClient() + _, err = b.InitializeWorkspaceClient() assert.ErrorContains(t, err, "config host mismatch: profile uses host https://a.com, but CLI configured to use https://x.com") } @@ -121,7 +123,8 @@ func TestBundleConfigureWithCorrectProfile(t *testing.T) { testutil.CleanupEnvironment(t) cmd := emptyCommand(t) - cmd.Flag("profile").Value.Set("PROFILE-1") + err := cmd.Flag("profile").Value.Set("PROFILE-1") + require.NoError(t, err) b := setupWithHost(t, cmd, "https://a.com") client, err := b.InitializeWorkspaceClient() @@ -146,7 +149,8 @@ func TestBundleConfigureWithProfileFlagAndEnvVariable(t *testing.T) { t.Setenv("DATABRICKS_CONFIG_PROFILE", "NOEXIST") cmd := emptyCommand(t) - cmd.Flag("profile").Value.Set("PROFILE-1") + err := cmd.Flag("profile").Value.Set("PROFILE-1") + require.NoError(t, err) b := setupWithHost(t, cmd, "https://a.com") client, err := b.InitializeWorkspaceClient() @@ -174,7 +178,8 @@ func TestBundleConfigureProfileFlag(t *testing.T) { // The --profile flag takes precedence over the profile in the databricks.yml file cmd := emptyCommand(t) - cmd.Flag("profile").Value.Set("PROFILE-2") + err := cmd.Flag("profile").Value.Set("PROFILE-2") + require.NoError(t, err) b := setupWithProfile(t, cmd, "PROFILE-1") client, err := b.InitializeWorkspaceClient() @@ -205,7 +210,8 @@ func TestBundleConfigureProfileFlagAndEnvVariable(t *testing.T) { // The --profile flag takes precedence over the DATABRICKS_CONFIG_PROFILE environment variable t.Setenv("DATABRICKS_CONFIG_PROFILE", "NOEXIST") cmd := emptyCommand(t) - cmd.Flag("profile").Value.Set("PROFILE-2") + err := cmd.Flag("profile").Value.Set("PROFILE-2") + require.NoError(t, err) b := setupWithProfile(t, cmd, "PROFILE-1") client, err := b.InitializeWorkspaceClient() diff --git a/cmd/root/io.go b/cmd/root/io.go index b224bbb27..bba989a79 100644 --- a/cmd/root/io.go +++ b/cmd/root/io.go @@ -21,7 +21,7 @@ func initOutputFlag(cmd *cobra.Command) *outputFlag { // Configure defaults from environment, if applicable. // If the provided value is invalid it is ignored. if v, ok := env.Lookup(cmd.Context(), envOutputFormat); ok { - f.output.Set(v) + f.output.Set(v) //nolint:errcheck } cmd.PersistentFlags().VarP(&f.output, "output", "o", "output type: text or json") @@ -45,8 +45,9 @@ func (f *outputFlag) initializeIO(cmd *cobra.Command) error { headerTemplate = cmd.Annotations["headerTemplate"] } - cmdIO := cmdio.NewIO(f.output, cmd.InOrStdin(), cmd.OutOrStdout(), cmd.ErrOrStderr(), headerTemplate, template) - ctx := cmdio.InContext(cmd.Context(), cmdIO) + ctx := cmd.Context() + cmdIO := cmdio.NewIO(ctx, f.output, cmd.InOrStdin(), cmd.OutOrStdout(), cmd.ErrOrStderr(), headerTemplate, template) + ctx = cmdio.InContext(ctx, cmdIO) cmd.SetContext(ctx) return nil } diff --git a/cmd/root/logger.go b/cmd/root/logger.go index 48cb99a37..38e09b9c9 100644 --- a/cmd/root/logger.go +++ b/cmd/root/logger.go @@ -45,7 +45,10 @@ func (f *logFlags) makeLogHandler(opts slog.HandlerOptions) (slog.Handler, error func (f *logFlags) initializeContext(ctx context.Context) (context.Context, error) { if f.debug { - f.level.Set("debug") + err := f.level.Set("debug") + if err != nil { + return nil, err + } } opts := slog.HandlerOptions{} @@ -81,13 +84,13 @@ func initLogFlags(cmd *cobra.Command) *logFlags { // Configure defaults from environment, if applicable. // If the provided value is invalid it is ignored. if v, ok := env.Lookup(cmd.Context(), envLogFile); ok { - f.file.Set(v) + f.file.Set(v) //nolint:errcheck } if v, ok := env.Lookup(cmd.Context(), envLogLevel); ok { - f.level.Set(v) + f.level.Set(v) //nolint:errcheck } if v, ok := env.Lookup(cmd.Context(), envLogFormat); ok { - f.output.Set(v) + f.output.Set(v) //nolint:errcheck } flags := cmd.PersistentFlags() diff --git a/cmd/root/progress_logger.go b/cmd/root/progress_logger.go index 7d6a1fa46..1458de13a 100644 --- a/cmd/root/progress_logger.go +++ b/cmd/root/progress_logger.go @@ -59,7 +59,7 @@ func initProgressLoggerFlag(cmd *cobra.Command, logFlags *logFlags) *progressLog // Configure defaults from environment, if applicable. // If the provided value is invalid it is ignored. if v, ok := env.Lookup(cmd.Context(), envProgressFormat); ok { - f.Set(v) + _ = f.Set(v) } flags := cmd.PersistentFlags() diff --git a/cmd/root/progress_logger_test.go b/cmd/root/progress_logger_test.go index 9dceee8d5..42ba1bdc6 100644 --- a/cmd/root/progress_logger_test.go +++ b/cmd/root/progress_logger_test.go @@ -33,27 +33,27 @@ func initializeProgressLoggerTest(t *testing.T) ( func TestInitializeErrorOnIncompatibleConfig(t *testing.T) { plt, logLevel, logFile, progressFormat := initializeProgressLoggerTest(t) - logLevel.Set("info") - logFile.Set("stderr") - progressFormat.Set("inplace") + require.NoError(t, logLevel.Set("info")) + require.NoError(t, logFile.Set("stderr")) + require.NoError(t, progressFormat.Set("inplace")) _, err := plt.progressLoggerFlag.initializeContext(context.Background()) assert.ErrorContains(t, err, "inplace progress logging cannot be used when log-file is stderr") } func TestNoErrorOnDisabledLogLevel(t *testing.T) { plt, logLevel, logFile, progressFormat := initializeProgressLoggerTest(t) - logLevel.Set("disabled") - logFile.Set("stderr") - progressFormat.Set("inplace") + require.NoError(t, logLevel.Set("disabled")) + require.NoError(t, logFile.Set("stderr")) + require.NoError(t, progressFormat.Set("inplace")) _, err := plt.progressLoggerFlag.initializeContext(context.Background()) assert.NoError(t, err) } func TestNoErrorOnNonStderrLogFile(t *testing.T) { plt, logLevel, logFile, progressFormat := initializeProgressLoggerTest(t) - logLevel.Set("info") - logFile.Set("stdout") - progressFormat.Set("inplace") + require.NoError(t, logLevel.Set("info")) + require.NoError(t, logFile.Set("stdout")) + require.NoError(t, progressFormat.Set("inplace")) _, err := plt.progressLoggerFlag.initializeContext(context.Background()) assert.NoError(t, err) } diff --git a/cmd/root/root.go b/cmd/root/root.go index e6f66f126..3b37d0176 100644 --- a/cmd/root/root.go +++ b/cmd/root/root.go @@ -4,11 +4,10 @@ import ( "context" "errors" "fmt" + "log/slog" "os" "strings" - "log/slog" - "github.com/databricks/cli/internal/build" "github.com/databricks/cli/libs/cmdio" "github.com/databricks/cli/libs/dbr" diff --git a/cmd/root/user_agent_upstream.go b/cmd/root/user_agent_upstream.go index f580b4263..a813e8ee7 100644 --- a/cmd/root/user_agent_upstream.go +++ b/cmd/root/user_agent_upstream.go @@ -8,12 +8,16 @@ import ( ) // Environment variables that caller can set to convey what is upstream to this CLI. -const upstreamEnvVar = "DATABRICKS_CLI_UPSTREAM" -const upstreamVersionEnvVar = "DATABRICKS_CLI_UPSTREAM_VERSION" +const ( + upstreamEnvVar = "DATABRICKS_CLI_UPSTREAM" + upstreamVersionEnvVar = "DATABRICKS_CLI_UPSTREAM_VERSION" +) // Keys in the user agent. -const upstreamKey = "upstream" -const upstreamVersionKey = "upstream-version" +const ( + upstreamKey = "upstream" + upstreamVersionKey = "upstream-version" +) func withUpstreamInUserAgent(ctx context.Context) context.Context { value := env.Get(ctx, upstreamEnvVar) diff --git a/cmd/sync/sync.go b/cmd/sync/sync.go index 6d722fb08..cd2167a19 100644 --- a/cmd/sync/sync.go +++ b/cmd/sync/sync.go @@ -68,7 +68,6 @@ func (f *syncFlags) syncOptionsFromArgs(cmd *cobra.Command, args []string) (*syn localRoot := vfs.MustNew(args[0]) info, err := git.FetchRepositoryInfo(ctx, localRoot.Native(), client) - if err != nil { log.Warnf(ctx, "Failed to read git info: %s", err) } diff --git a/cmd/workspace/aibi-dashboard-embedding-access-policy/aibi-dashboard-embedding-access-policy.go b/cmd/workspace/aibi-dashboard-embedding-access-policy/aibi-dashboard-embedding-access-policy.go index b1adf6103..3f905e521 100755 --- a/cmd/workspace/aibi-dashboard-embedding-access-policy/aibi-dashboard-embedding-access-policy.go +++ b/cmd/workspace/aibi-dashboard-embedding-access-policy/aibi-dashboard-embedding-access-policy.go @@ -26,6 +26,7 @@ func New() *cobra.Command { } // Add methods + cmd.AddCommand(newDelete()) cmd.AddCommand(newGet()) cmd.AddCommand(newUpdate()) @@ -37,6 +38,62 @@ func New() *cobra.Command { return cmd } +// start delete command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteOverrides []func( + *cobra.Command, + *settings.DeleteAibiDashboardEmbeddingAccessPolicySettingRequest, +) + +func newDelete() *cobra.Command { + cmd := &cobra.Command{} + + var deleteReq settings.DeleteAibiDashboardEmbeddingAccessPolicySettingRequest + + // TODO: short flags + + cmd.Flags().StringVar(&deleteReq.Etag, "etag", deleteReq.Etag, `etag used for versioning.`) + + cmd.Use = "delete" + cmd.Short = `Delete the AI/BI dashboard embedding access policy.` + cmd.Long = `Delete the AI/BI dashboard embedding access policy. + + Delete the AI/BI dashboard embedding access policy, reverting back to the + default.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + response, err := w.Settings.AibiDashboardEmbeddingAccessPolicy().Delete(ctx, deleteReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deleteOverrides { + fn(cmd, &deleteReq) + } + + return cmd +} + // start get command // Slice with functions to override default command behavior. diff --git a/cmd/workspace/aibi-dashboard-embedding-approved-domains/aibi-dashboard-embedding-approved-domains.go b/cmd/workspace/aibi-dashboard-embedding-approved-domains/aibi-dashboard-embedding-approved-domains.go index 481197460..69db66504 100755 --- a/cmd/workspace/aibi-dashboard-embedding-approved-domains/aibi-dashboard-embedding-approved-domains.go +++ b/cmd/workspace/aibi-dashboard-embedding-approved-domains/aibi-dashboard-embedding-approved-domains.go @@ -26,6 +26,7 @@ func New() *cobra.Command { } // Add methods + cmd.AddCommand(newDelete()) cmd.AddCommand(newGet()) cmd.AddCommand(newUpdate()) @@ -37,6 +38,62 @@ func New() *cobra.Command { return cmd } +// start delete command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteOverrides []func( + *cobra.Command, + *settings.DeleteAibiDashboardEmbeddingApprovedDomainsSettingRequest, +) + +func newDelete() *cobra.Command { + cmd := &cobra.Command{} + + var deleteReq settings.DeleteAibiDashboardEmbeddingApprovedDomainsSettingRequest + + // TODO: short flags + + cmd.Flags().StringVar(&deleteReq.Etag, "etag", deleteReq.Etag, `etag used for versioning.`) + + cmd.Use = "delete" + cmd.Short = `Delete AI/BI dashboard embedding approved domains.` + cmd.Long = `Delete AI/BI dashboard embedding approved domains. + + Delete the list of domains approved to host embedded AI/BI dashboards, + reverting back to the default empty list.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + response, err := w.Settings.AibiDashboardEmbeddingApprovedDomains().Delete(ctx, deleteReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deleteOverrides { + fn(cmd, &deleteReq) + } + + return cmd +} + // start get command // Slice with functions to override default command behavior. diff --git a/cmd/workspace/clean-room-assets/clean-room-assets.go b/cmd/workspace/clean-room-assets/clean-room-assets.go new file mode 100755 index 000000000..872f0ecef --- /dev/null +++ b/cmd/workspace/clean-room-assets/clean-room-assets.go @@ -0,0 +1,419 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package clean_room_assets + +import ( + "fmt" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/cleanrooms" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "clean-room-assets", + Short: `Clean room assets are data and code objects — Tables, volumes, and notebooks that are shared with the clean room.`, + Long: `Clean room assets are data and code objects — Tables, volumes, and notebooks + that are shared with the clean room.`, + GroupID: "cleanrooms", + Annotations: map[string]string{ + "package": "cleanrooms", + }, + } + + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newUpdate()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start create command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var createOverrides []func( + *cobra.Command, + *cleanrooms.CreateCleanRoomAssetRequest, +) + +func newCreate() *cobra.Command { + cmd := &cobra.Command{} + + var createReq cleanrooms.CreateCleanRoomAssetRequest + createReq.Asset = &cleanrooms.CleanRoomAsset{} + var createJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().Var(&createReq.Asset.AssetType, "asset-type", `The type of the asset. Supported values: [FOREIGN_TABLE, NOTEBOOK_FILE, TABLE, VIEW, VOLUME]`) + // TODO: complex arg: foreign_table + // TODO: complex arg: foreign_table_local_details + cmd.Flags().StringVar(&createReq.Asset.Name, "name", createReq.Asset.Name, `A fully qualified name that uniquely identifies the asset within the clean room.`) + // TODO: complex arg: notebook + // TODO: complex arg: table + // TODO: complex arg: table_local_details + // TODO: complex arg: view + // TODO: complex arg: view_local_details + // TODO: complex arg: volume_local_details + + cmd.Use = "create CLEAN_ROOM_NAME" + cmd.Short = `Create an asset.` + cmd.Long = `Create an asset. + + Create a clean room asset —share an asset like a notebook or table into the + clean room. For each UC asset that is added through this method, the clean + room owner must also have enough privilege on the asset to consume it. The + privilege must be maintained indefinitely for the clean room to be able to + access the asset. Typically, you should use a group as the clean room owner. + + Arguments: + CLEAN_ROOM_NAME: Name of the clean room.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + diags := createJson.Unmarshal(&createReq.Asset) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } + } + } + createReq.CleanRoomName = args[0] + + response, err := w.CleanRoomAssets.Create(ctx, createReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range createOverrides { + fn(cmd, &createReq) + } + + return cmd +} + +// start delete command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteOverrides []func( + *cobra.Command, + *cleanrooms.DeleteCleanRoomAssetRequest, +) + +func newDelete() *cobra.Command { + cmd := &cobra.Command{} + + var deleteReq cleanrooms.DeleteCleanRoomAssetRequest + + // TODO: short flags + + cmd.Use = "delete CLEAN_ROOM_NAME ASSET_TYPE ASSET_FULL_NAME" + cmd.Short = `Delete an asset.` + cmd.Long = `Delete an asset. + + Delete a clean room asset - unshare/remove the asset from the clean room + + Arguments: + CLEAN_ROOM_NAME: Name of the clean room. + ASSET_TYPE: The type of the asset. + ASSET_FULL_NAME: The fully qualified name of the asset, it is same as the name field in + CleanRoomAsset.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(3) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + deleteReq.CleanRoomName = args[0] + _, err = fmt.Sscan(args[1], &deleteReq.AssetType) + if err != nil { + return fmt.Errorf("invalid ASSET_TYPE: %s", args[1]) + } + deleteReq.AssetFullName = args[2] + + err = w.CleanRoomAssets.Delete(ctx, deleteReq) + if err != nil { + return err + } + return nil + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deleteOverrides { + fn(cmd, &deleteReq) + } + + return cmd +} + +// start get command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getOverrides []func( + *cobra.Command, + *cleanrooms.GetCleanRoomAssetRequest, +) + +func newGet() *cobra.Command { + cmd := &cobra.Command{} + + var getReq cleanrooms.GetCleanRoomAssetRequest + + // TODO: short flags + + cmd.Use = "get CLEAN_ROOM_NAME ASSET_TYPE ASSET_FULL_NAME" + cmd.Short = `Get an asset.` + cmd.Long = `Get an asset. + + Get the details of a clean room asset by its type and full name. + + Arguments: + CLEAN_ROOM_NAME: Name of the clean room. + ASSET_TYPE: The type of the asset. + ASSET_FULL_NAME: The fully qualified name of the asset, it is same as the name field in + CleanRoomAsset.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(3) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + getReq.CleanRoomName = args[0] + _, err = fmt.Sscan(args[1], &getReq.AssetType) + if err != nil { + return fmt.Errorf("invalid ASSET_TYPE: %s", args[1]) + } + getReq.AssetFullName = args[2] + + response, err := w.CleanRoomAssets.Get(ctx, getReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getOverrides { + fn(cmd, &getReq) + } + + return cmd +} + +// start list command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var listOverrides []func( + *cobra.Command, + *cleanrooms.ListCleanRoomAssetsRequest, +) + +func newList() *cobra.Command { + cmd := &cobra.Command{} + + var listReq cleanrooms.ListCleanRoomAssetsRequest + + // TODO: short flags + + cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `Opaque pagination token to go to next page based on previous query.`) + + cmd.Use = "list CLEAN_ROOM_NAME" + cmd.Short = `List assets.` + cmd.Long = `List assets. + + Arguments: + CLEAN_ROOM_NAME: Name of the clean room.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + listReq.CleanRoomName = args[0] + + response := w.CleanRoomAssets.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range listOverrides { + fn(cmd, &listReq) + } + + return cmd +} + +// start update command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateOverrides []func( + *cobra.Command, + *cleanrooms.UpdateCleanRoomAssetRequest, +) + +func newUpdate() *cobra.Command { + cmd := &cobra.Command{} + + var updateReq cleanrooms.UpdateCleanRoomAssetRequest + updateReq.Asset = &cleanrooms.CleanRoomAsset{} + var updateJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().Var(&updateReq.Asset.AssetType, "asset-type", `The type of the asset. Supported values: [FOREIGN_TABLE, NOTEBOOK_FILE, TABLE, VIEW, VOLUME]`) + // TODO: complex arg: foreign_table + // TODO: complex arg: foreign_table_local_details + cmd.Flags().StringVar(&updateReq.Asset.Name, "name", updateReq.Asset.Name, `A fully qualified name that uniquely identifies the asset within the clean room.`) + // TODO: complex arg: notebook + // TODO: complex arg: table + // TODO: complex arg: table_local_details + // TODO: complex arg: view + // TODO: complex arg: view_local_details + // TODO: complex arg: volume_local_details + + cmd.Use = "update CLEAN_ROOM_NAME ASSET_TYPE NAME" + cmd.Short = `Update an asset.` + cmd.Long = `Update an asset. + + Update a clean room asset. For example, updating the content of a notebook; + changing the shared partitions of a table; etc. + + Arguments: + CLEAN_ROOM_NAME: Name of the clean room. + ASSET_TYPE: The type of the asset. + NAME: A fully qualified name that uniquely identifies the asset within the clean + room. This is also the name displayed in the clean room UI. + + For UC securable assets (tables, volumes, etc.), the format is + *shared_catalog*.*shared_schema*.*asset_name* + + For notebooks, the name is the notebook file name.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(3) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + diags := updateJson.Unmarshal(&updateReq.Asset) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } + } + } + updateReq.CleanRoomName = args[0] + _, err = fmt.Sscan(args[1], &updateReq.AssetType) + if err != nil { + return fmt.Errorf("invalid ASSET_TYPE: %s", args[1]) + } + updateReq.Name = args[2] + + response, err := w.CleanRoomAssets.Update(ctx, updateReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateOverrides { + fn(cmd, &updateReq) + } + + return cmd +} + +// end service CleanRoomAssets diff --git a/cmd/workspace/clean-room-task-runs/clean-room-task-runs.go b/cmd/workspace/clean-room-task-runs/clean-room-task-runs.go new file mode 100755 index 000000000..b41e380cc --- /dev/null +++ b/cmd/workspace/clean-room-task-runs/clean-room-task-runs.go @@ -0,0 +1,97 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package clean_room_task_runs + +import ( + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/databricks-sdk-go/service/cleanrooms" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "clean-room-task-runs", + Short: `Clean room task runs are the executions of notebooks in a clean room.`, + Long: `Clean room task runs are the executions of notebooks in a clean room.`, + GroupID: "cleanrooms", + Annotations: map[string]string{ + "package": "cleanrooms", + }, + } + + // Add methods + cmd.AddCommand(newList()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start list command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var listOverrides []func( + *cobra.Command, + *cleanrooms.ListCleanRoomNotebookTaskRunsRequest, +) + +func newList() *cobra.Command { + cmd := &cobra.Command{} + + var listReq cleanrooms.ListCleanRoomNotebookTaskRunsRequest + + // TODO: short flags + + cmd.Flags().StringVar(&listReq.NotebookName, "notebook-name", listReq.NotebookName, `Notebook name.`) + cmd.Flags().IntVar(&listReq.PageSize, "page-size", listReq.PageSize, `The maximum number of task runs to return.`) + cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `Opaque pagination token to go to next page based on previous query.`) + + cmd.Use = "list CLEAN_ROOM_NAME" + cmd.Short = `List notebook task runs.` + cmd.Long = `List notebook task runs. + + List all the historical notebook task runs in a clean room. + + Arguments: + CLEAN_ROOM_NAME: Name of the clean room.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + listReq.CleanRoomName = args[0] + + response := w.CleanRoomTaskRuns.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range listOverrides { + fn(cmd, &listReq) + } + + return cmd +} + +// end service CleanRoomTaskRuns diff --git a/cmd/workspace/clean-rooms/clean-rooms.go b/cmd/workspace/clean-rooms/clean-rooms.go new file mode 100755 index 000000000..053e41e8a --- /dev/null +++ b/cmd/workspace/clean-rooms/clean-rooms.go @@ -0,0 +1,450 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package clean_rooms + +import ( + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/cleanrooms" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "clean-rooms", + Short: `A clean room uses Delta Sharing and serverless compute to provide a secure and privacy-protecting environment where multiple parties can work together on sensitive enterprise data without direct access to each other’s data.`, + Long: `A clean room uses Delta Sharing and serverless compute to provide a secure and + privacy-protecting environment where multiple parties can work together on + sensitive enterprise data without direct access to each other’s data.`, + GroupID: "cleanrooms", + Annotations: map[string]string{ + "package": "cleanrooms", + }, + } + + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newCreateOutputCatalog()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newUpdate()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start create command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var createOverrides []func( + *cobra.Command, + *cleanrooms.CreateCleanRoomRequest, +) + +func newCreate() *cobra.Command { + cmd := &cobra.Command{} + + var createReq cleanrooms.CreateCleanRoomRequest + createReq.CleanRoom = &cleanrooms.CleanRoom{} + var createJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().StringVar(&createReq.CleanRoom.Comment, "comment", createReq.CleanRoom.Comment, ``) + cmd.Flags().StringVar(&createReq.CleanRoom.Name, "name", createReq.CleanRoom.Name, `The name of the clean room.`) + // TODO: complex arg: output_catalog + cmd.Flags().StringVar(&createReq.CleanRoom.Owner, "owner", createReq.CleanRoom.Owner, `This is Databricks username of the owner of the local clean room securable for permission management.`) + // TODO: complex arg: remote_detailed_info + + cmd.Use = "create" + cmd.Short = `Create a clean room.` + cmd.Long = `Create a clean room. + + Create a new clean room with the specified collaborators. This method is + asynchronous; the returned name field inside the clean_room field can be used + to poll the clean room status, using the :method:cleanrooms/get method. When + this method returns, the cluster will be in a PROVISIONING state. The cluster + will be usable once it enters an ACTIVE state. + + The caller must be a metastore admin or have the **CREATE_CLEAN_ROOM** + privilege on the metastore.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + diags := createJson.Unmarshal(&createReq.CleanRoom) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } + } + } + + response, err := w.CleanRooms.Create(ctx, createReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range createOverrides { + fn(cmd, &createReq) + } + + return cmd +} + +// start create-output-catalog command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var createOutputCatalogOverrides []func( + *cobra.Command, + *cleanrooms.CreateCleanRoomOutputCatalogRequest, +) + +func newCreateOutputCatalog() *cobra.Command { + cmd := &cobra.Command{} + + var createOutputCatalogReq cleanrooms.CreateCleanRoomOutputCatalogRequest + createOutputCatalogReq.OutputCatalog = &cleanrooms.CleanRoomOutputCatalog{} + var createOutputCatalogJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&createOutputCatalogJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().StringVar(&createOutputCatalogReq.OutputCatalog.CatalogName, "catalog-name", createOutputCatalogReq.OutputCatalog.CatalogName, `The name of the output catalog in UC.`) + + cmd.Use = "create-output-catalog CLEAN_ROOM_NAME" + cmd.Short = `Create an output catalog.` + cmd.Long = `Create an output catalog. + + Create the output catalog of the clean room. + + Arguments: + CLEAN_ROOM_NAME: Name of the clean room.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + diags := createOutputCatalogJson.Unmarshal(&createOutputCatalogReq.OutputCatalog) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } + } + } + createOutputCatalogReq.CleanRoomName = args[0] + + response, err := w.CleanRooms.CreateOutputCatalog(ctx, createOutputCatalogReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range createOutputCatalogOverrides { + fn(cmd, &createOutputCatalogReq) + } + + return cmd +} + +// start delete command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteOverrides []func( + *cobra.Command, + *cleanrooms.DeleteCleanRoomRequest, +) + +func newDelete() *cobra.Command { + cmd := &cobra.Command{} + + var deleteReq cleanrooms.DeleteCleanRoomRequest + + // TODO: short flags + + cmd.Use = "delete NAME" + cmd.Short = `Delete a clean room.` + cmd.Long = `Delete a clean room. + + Delete a clean room. After deletion, the clean room will be removed from the + metastore. If the other collaborators have not deleted the clean room, they + will still have the clean room in their metastore, but it will be in a DELETED + state and no operations other than deletion can be performed on it. + + Arguments: + NAME: Name of the clean room.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + deleteReq.Name = args[0] + + err = w.CleanRooms.Delete(ctx, deleteReq) + if err != nil { + return err + } + return nil + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deleteOverrides { + fn(cmd, &deleteReq) + } + + return cmd +} + +// start get command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getOverrides []func( + *cobra.Command, + *cleanrooms.GetCleanRoomRequest, +) + +func newGet() *cobra.Command { + cmd := &cobra.Command{} + + var getReq cleanrooms.GetCleanRoomRequest + + // TODO: short flags + + cmd.Use = "get NAME" + cmd.Short = `Get a clean room.` + cmd.Long = `Get a clean room. + + Get the details of a clean room given its name.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + getReq.Name = args[0] + + response, err := w.CleanRooms.Get(ctx, getReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getOverrides { + fn(cmd, &getReq) + } + + return cmd +} + +// start list command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var listOverrides []func( + *cobra.Command, + *cleanrooms.ListCleanRoomsRequest, +) + +func newList() *cobra.Command { + cmd := &cobra.Command{} + + var listReq cleanrooms.ListCleanRoomsRequest + + // TODO: short flags + + cmd.Flags().IntVar(&listReq.PageSize, "page-size", listReq.PageSize, `Maximum number of clean rooms to return (i.e., the page length).`) + cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `Opaque pagination token to go to next page based on previous query.`) + + cmd.Use = "list" + cmd.Short = `List clean rooms.` + cmd.Long = `List clean rooms. + + Get a list of all clean rooms of the metastore. Only clean rooms the caller + has access to are returned.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + response := w.CleanRooms.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range listOverrides { + fn(cmd, &listReq) + } + + return cmd +} + +// start update command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateOverrides []func( + *cobra.Command, + *cleanrooms.UpdateCleanRoomRequest, +) + +func newUpdate() *cobra.Command { + cmd := &cobra.Command{} + + var updateReq cleanrooms.UpdateCleanRoomRequest + var updateJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + // TODO: complex arg: clean_room + + cmd.Use = "update NAME" + cmd.Short = `Update a clean room.` + cmd.Long = `Update a clean room. + + Update a clean room. The caller must be the owner of the clean room, have + **MODIFY_CLEAN_ROOM** privilege, or be metastore admin. + + When the caller is a metastore admin, only the __owner__ field can be updated. + + Arguments: + NAME: Name of the clean room.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + diags := updateJson.Unmarshal(&updateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } + } + } + updateReq.Name = args[0] + + response, err := w.CleanRooms.Update(ctx, updateReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateOverrides { + fn(cmd, &updateReq) + } + + return cmd +} + +// end service CleanRooms diff --git a/cmd/workspace/clusters/clusters.go b/cmd/workspace/clusters/clusters.go index db788753b..bbb7c578a 100755 --- a/cmd/workspace/clusters/clusters.go +++ b/cmd/workspace/clusters/clusters.go @@ -204,6 +204,9 @@ func newCreate() *cobra.Command { cmd.Flags().StringVar(&createReq.ClusterName, "cluster-name", createReq.ClusterName, `Cluster name requested by the user.`) // TODO: map via StringToStringVar: custom_tags cmd.Flags().Var(&createReq.DataSecurityMode, "data-security-mode", `Data security mode decides what data governance model to use when accessing data from a cluster. Supported values: [ + DATA_SECURITY_MODE_AUTO, + DATA_SECURITY_MODE_DEDICATED, + DATA_SECURITY_MODE_STANDARD, LEGACY_PASSTHROUGH, LEGACY_SINGLE_USER, LEGACY_SINGLE_USER_STANDARD, @@ -220,6 +223,8 @@ func newCreate() *cobra.Command { // TODO: complex arg: gcp_attributes // TODO: array: init_scripts cmd.Flags().StringVar(&createReq.InstancePoolId, "instance-pool-id", createReq.InstancePoolId, `The optional ID of the instance pool to which the cluster belongs.`) + cmd.Flags().BoolVar(&createReq.IsSingleNode, "is-single-node", createReq.IsSingleNode, `This field can only be used with kind.`) + cmd.Flags().Var(&createReq.Kind, "kind", `The kind of compute described by this compute specification. Supported values: [CLASSIC_PREVIEW]`) cmd.Flags().StringVar(&createReq.NodeTypeId, "node-type-id", createReq.NodeTypeId, `This field encodes, through a single value, the resources available to each of the Spark nodes in this cluster.`) cmd.Flags().IntVar(&createReq.NumWorkers, "num-workers", createReq.NumWorkers, `Number of worker nodes that this cluster should have.`) cmd.Flags().StringVar(&createReq.PolicyId, "policy-id", createReq.PolicyId, `The ID of the cluster policy used to create the cluster if applicable.`) @@ -228,6 +233,7 @@ func newCreate() *cobra.Command { // TODO: map via StringToStringVar: spark_conf // TODO: map via StringToStringVar: spark_env_vars // TODO: array: ssh_public_keys + cmd.Flags().BoolVar(&createReq.UseMlRuntime, "use-ml-runtime", createReq.UseMlRuntime, `This field can only be used with kind.`) // TODO: complex arg: workload_type cmd.Use = "create SPARK_VERSION" @@ -468,6 +474,9 @@ func newEdit() *cobra.Command { cmd.Flags().StringVar(&editReq.ClusterName, "cluster-name", editReq.ClusterName, `Cluster name requested by the user.`) // TODO: map via StringToStringVar: custom_tags cmd.Flags().Var(&editReq.DataSecurityMode, "data-security-mode", `Data security mode decides what data governance model to use when accessing data from a cluster. Supported values: [ + DATA_SECURITY_MODE_AUTO, + DATA_SECURITY_MODE_DEDICATED, + DATA_SECURITY_MODE_STANDARD, LEGACY_PASSTHROUGH, LEGACY_SINGLE_USER, LEGACY_SINGLE_USER_STANDARD, @@ -484,6 +493,8 @@ func newEdit() *cobra.Command { // TODO: complex arg: gcp_attributes // TODO: array: init_scripts cmd.Flags().StringVar(&editReq.InstancePoolId, "instance-pool-id", editReq.InstancePoolId, `The optional ID of the instance pool to which the cluster belongs.`) + cmd.Flags().BoolVar(&editReq.IsSingleNode, "is-single-node", editReq.IsSingleNode, `This field can only be used with kind.`) + cmd.Flags().Var(&editReq.Kind, "kind", `The kind of compute described by this compute specification. Supported values: [CLASSIC_PREVIEW]`) cmd.Flags().StringVar(&editReq.NodeTypeId, "node-type-id", editReq.NodeTypeId, `This field encodes, through a single value, the resources available to each of the Spark nodes in this cluster.`) cmd.Flags().IntVar(&editReq.NumWorkers, "num-workers", editReq.NumWorkers, `Number of worker nodes that this cluster should have.`) cmd.Flags().StringVar(&editReq.PolicyId, "policy-id", editReq.PolicyId, `The ID of the cluster policy used to create the cluster if applicable.`) @@ -492,6 +503,7 @@ func newEdit() *cobra.Command { // TODO: map via StringToStringVar: spark_conf // TODO: map via StringToStringVar: spark_env_vars // TODO: array: ssh_public_keys + cmd.Flags().BoolVar(&editReq.UseMlRuntime, "use-ml-runtime", editReq.UseMlRuntime, `This field can only be used with kind.`) // TODO: complex arg: workload_type cmd.Use = "edit CLUSTER_ID SPARK_VERSION" diff --git a/cmd/workspace/cmd.go b/cmd/workspace/cmd.go index 9cb3cca9e..f07d0cf76 100755 --- a/cmd/workspace/cmd.go +++ b/cmd/workspace/cmd.go @@ -8,6 +8,9 @@ import ( apps "github.com/databricks/cli/cmd/workspace/apps" artifact_allowlists "github.com/databricks/cli/cmd/workspace/artifact-allowlists" catalogs "github.com/databricks/cli/cmd/workspace/catalogs" + clean_room_assets "github.com/databricks/cli/cmd/workspace/clean-room-assets" + clean_room_task_runs "github.com/databricks/cli/cmd/workspace/clean-room-task-runs" + clean_rooms "github.com/databricks/cli/cmd/workspace/clean-rooms" cluster_policies "github.com/databricks/cli/cmd/workspace/cluster-policies" clusters "github.com/databricks/cli/cmd/workspace/clusters" connections "github.com/databricks/cli/cmd/workspace/connections" @@ -98,6 +101,9 @@ func All() []*cobra.Command { out = append(out, apps.New()) out = append(out, artifact_allowlists.New()) out = append(out, catalogs.New()) + out = append(out, clean_room_assets.New()) + out = append(out, clean_room_task_runs.New()) + out = append(out, clean_rooms.New()) out = append(out, cluster_policies.New()) out = append(out, clusters.New()) out = append(out, connections.New()) diff --git a/cmd/workspace/credentials/credentials.go b/cmd/workspace/credentials/credentials.go index 44ee0cf31..672a3aeec 100755 --- a/cmd/workspace/credentials/credentials.go +++ b/cmd/workspace/credentials/credentials.go @@ -27,7 +27,7 @@ func New() *cobra.Command { To create credentials, you must be a Databricks account admin or have the CREATE SERVICE CREDENTIAL privilege. The user who creates the credential can - delegate ownership to another user or group to manage permissions on it`, + delegate ownership to another user or group to manage permissions on it.`, GroupID: "catalog", Annotations: map[string]string{ "package": "catalog", @@ -73,7 +73,7 @@ func newCreateCredential() *cobra.Command { // TODO: complex arg: azure_managed_identity // TODO: complex arg: azure_service_principal cmd.Flags().StringVar(&createCredentialReq.Comment, "comment", createCredentialReq.Comment, `Comment associated with the credential.`) - // TODO: complex arg: gcp_service_account_key + // TODO: complex arg: databricks_gcp_service_account cmd.Flags().Var(&createCredentialReq.Purpose, "purpose", `Indicates the purpose of the credential. Supported values: [SERVICE, STORAGE]`) cmd.Flags().BoolVar(&createCredentialReq.ReadOnly, "read-only", createCredentialReq.ReadOnly, `Whether the credential is usable only for read operations.`) cmd.Flags().BoolVar(&createCredentialReq.SkipValidation, "skip-validation", createCredentialReq.SkipValidation, `Optional.`) @@ -227,6 +227,7 @@ func newGenerateTemporaryServiceCredential() *cobra.Command { cmd.Flags().Var(&generateTemporaryServiceCredentialJson, "json", `either inline JSON string or @path/to/file.json with request body`) // TODO: complex arg: azure_options + // TODO: complex arg: gcp_options cmd.Use = "generate-temporary-service-credential CREDENTIAL_NAME" cmd.Short = `Generate a temporary service credential.` @@ -434,6 +435,7 @@ func newUpdateCredential() *cobra.Command { // TODO: complex arg: azure_managed_identity // TODO: complex arg: azure_service_principal cmd.Flags().StringVar(&updateCredentialReq.Comment, "comment", updateCredentialReq.Comment, `Comment associated with the credential.`) + // TODO: complex arg: databricks_gcp_service_account cmd.Flags().BoolVar(&updateCredentialReq.Force, "force", updateCredentialReq.Force, `Force an update even if there are dependent services (when purpose is **SERVICE**) or dependent external locations and external tables (when purpose is **STORAGE**).`) cmd.Flags().Var(&updateCredentialReq.IsolationMode, "isolation-mode", `Whether the current securable is accessible from all workspaces or a specific set of workspaces. Supported values: [ISOLATION_MODE_ISOLATED, ISOLATION_MODE_OPEN]`) cmd.Flags().StringVar(&updateCredentialReq.NewName, "new-name", updateCredentialReq.NewName, `New name of credential.`) diff --git a/cmd/workspace/groups.go b/cmd/workspace/groups.go index 98e474d33..8827682fa 100644 --- a/cmd/workspace/groups.go +++ b/cmd/workspace/groups.go @@ -72,5 +72,9 @@ func Groups() []cobra.Group { ID: "apps", Title: "Apps", }, + { + ID: "cleanrooms", + Title: "Clean Rooms", + }, } } diff --git a/cmd/workspace/lakeview/lakeview.go b/cmd/workspace/lakeview/lakeview.go index 35c3bdf4e..6686f16da 100755 --- a/cmd/workspace/lakeview/lakeview.go +++ b/cmd/workspace/lakeview/lakeview.go @@ -160,9 +160,6 @@ func newCreateSchedule() *cobra.Command { Arguments: DASHBOARD_ID: UUID identifying the dashboard to which the schedule belongs.` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { @@ -242,9 +239,6 @@ func newCreateSubscription() *cobra.Command { DASHBOARD_ID: UUID identifying the dashboard to which the subscription belongs. SCHEDULE_ID: UUID identifying the schedule to which the subscription belongs.` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { @@ -322,9 +316,6 @@ func newDeleteSchedule() *cobra.Command { DASHBOARD_ID: UUID identifying the dashboard to which the schedule belongs. SCHEDULE_ID: UUID identifying the schedule.` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { @@ -384,9 +375,6 @@ func newDeleteSubscription() *cobra.Command { SCHEDULE_ID: UUID identifying the schedule which the subscription belongs. SUBSCRIPTION_ID: UUID identifying the subscription.` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { @@ -562,9 +550,6 @@ func newGetSchedule() *cobra.Command { DASHBOARD_ID: UUID identifying the dashboard to which the schedule belongs. SCHEDULE_ID: UUID identifying the schedule.` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { @@ -624,9 +609,6 @@ func newGetSubscription() *cobra.Command { SCHEDULE_ID: UUID identifying the schedule which the subscription belongs. SUBSCRIPTION_ID: UUID identifying the subscription.` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { @@ -739,9 +721,6 @@ func newListSchedules() *cobra.Command { Arguments: DASHBOARD_ID: UUID identifying the dashboard to which the schedules belongs.` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { @@ -798,9 +777,6 @@ func newListSubscriptions() *cobra.Command { DASHBOARD_ID: UUID identifying the dashboard which the subscriptions belongs. SCHEDULE_ID: UUID identifying the schedule which the subscriptions belongs.` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { @@ -852,6 +828,7 @@ func newMigrate() *cobra.Command { cmd.Flags().StringVar(&migrateReq.DisplayName, "display-name", migrateReq.DisplayName, `Display name for the new Lakeview dashboard.`) cmd.Flags().StringVar(&migrateReq.ParentPath, "parent-path", migrateReq.ParentPath, `The workspace path of the folder to contain the migrated Lakeview dashboard.`) + cmd.Flags().BoolVar(&migrateReq.UpdateParameterSyntax, "update-parameter-syntax", migrateReq.UpdateParameterSyntax, `Flag to indicate if mustache parameter syntax ({{ param }}) should be auto-updated to named syntax (:param) when converting datasets in the dashboard.`) cmd.Use = "migrate SOURCE_DASHBOARD_ID" cmd.Short = `Migrate dashboard.` @@ -1215,9 +1192,6 @@ func newUpdateSchedule() *cobra.Command { DASHBOARD_ID: UUID identifying the dashboard to which the schedule belongs. SCHEDULE_ID: UUID identifying the schedule.` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { diff --git a/cmd/workspace/workspace/export_dir.go b/cmd/workspace/workspace/export_dir.go index 0046f46ef..febe4c3e1 100644 --- a/cmd/workspace/workspace/export_dir.go +++ b/cmd/workspace/workspace/export_dir.go @@ -39,7 +39,7 @@ func (opts exportDirOptions) callback(ctx context.Context, workspaceFiler filer. // create directory and return early if d.IsDir() { - return os.MkdirAll(targetPath, 0755) + return os.MkdirAll(targetPath, 0o755) } // Add extension to local file path if the file is a notebook diff --git a/cmd/workspace/workspace/overrides.go b/cmd/workspace/workspace/overrides.go index cfed0a6ee..216e9b5d8 100644 --- a/cmd/workspace/workspace/overrides.go +++ b/cmd/workspace/workspace/overrides.go @@ -52,7 +52,7 @@ func exportOverride(exportCmd *cobra.Command, exportReq *workspace.ExportRequest if err != nil { return err } - return os.WriteFile(filePath, b, 0755) + return os.WriteFile(filePath, b, 0o755) } } @@ -88,7 +88,6 @@ func importOverride(importCmd *cobra.Command, importReq *workspace.Import) { err := originalRunE(cmd, args) return wrapImportAPIErrors(err, importReq) } - } func init() { diff --git a/go.mod b/go.mod index 7141ed768..2dda0cd60 100644 --- a/go.mod +++ b/go.mod @@ -2,19 +2,19 @@ module github.com/databricks/cli go 1.23 -toolchain go1.23.2 +toolchain go1.23.4 require ( github.com/Masterminds/semver/v3 v3.3.1 // MIT github.com/briandowns/spinner v1.23.1 // Apache 2.0 - github.com/databricks/databricks-sdk-go v0.52.0 // Apache 2.0 + github.com/databricks/databricks-sdk-go v0.54.0 // Apache 2.0 github.com/fatih/color v1.18.0 // MIT - github.com/ghodss/yaml v1.0.0 // MIT + NOTICE github.com/google/uuid v1.6.0 // BSD-3-Clause github.com/hashicorp/go-version v1.7.0 // MPL 2.0 github.com/hashicorp/hc-install v0.9.0 // MPL 2.0 github.com/hashicorp/terraform-exec v0.21.0 // MPL 2.0 github.com/hashicorp/terraform-json v0.23.0 // MPL 2.0 + github.com/hexops/gotextdiff v1.0.3 // BSD 3-Clause "New" or "Revised" License github.com/manifoldco/promptui v0.9.0 // BSD-3-Clause github.com/mattn/go-isatty v0.0.20 // MIT github.com/nwidger/jsoncolor v0.3.2 // MIT @@ -23,12 +23,13 @@ require ( github.com/spf13/cobra v1.8.1 // Apache 2.0 github.com/spf13/pflag v1.0.5 // BSD-3-Clause github.com/stretchr/testify v1.10.0 // MIT + github.com/wI2L/jsondiff v0.6.1 // MIT golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 golang.org/x/mod v0.22.0 golang.org/x/oauth2 v0.24.0 - golang.org/x/sync v0.9.0 - golang.org/x/term v0.26.0 - golang.org/x/text v0.20.0 + golang.org/x/sync v0.10.0 + golang.org/x/term v0.27.0 + golang.org/x/text v0.21.0 gopkg.in/ini.v1 v1.67.0 // Apache 2.0 gopkg.in/yaml.v3 v3.0.1 ) @@ -56,19 +57,22 @@ require ( github.com/mattn/go-colorable v0.1.13 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/stretchr/objx v0.5.2 // indirect + github.com/tidwall/gjson v1.18.0 // indirect + github.com/tidwall/match v1.1.1 // indirect + github.com/tidwall/pretty v1.2.1 // indirect + github.com/tidwall/sjson v1.2.5 // indirect github.com/zclconf/go-cty v1.15.0 // indirect go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect go.opentelemetry.io/otel v1.24.0 // indirect go.opentelemetry.io/otel/metric v1.24.0 // indirect go.opentelemetry.io/otel/trace v1.24.0 // indirect - golang.org/x/crypto v0.24.0 // indirect + golang.org/x/crypto v0.31.0 // indirect golang.org/x/net v0.26.0 // indirect - golang.org/x/sys v0.27.0 // indirect + golang.org/x/sys v0.28.0 // indirect golang.org/x/time v0.5.0 // indirect google.golang.org/api v0.182.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240521202816-d264139d666e // indirect google.golang.org/grpc v1.64.1 // indirect google.golang.org/protobuf v1.34.1 // indirect - gopkg.in/yaml.v2 v2.4.0 // indirect ) diff --git a/go.sum b/go.sum index 5d2c53a37..1e806ea03 100644 --- a/go.sum +++ b/go.sum @@ -32,8 +32,8 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= -github.com/databricks/databricks-sdk-go v0.52.0 h1:WKcj0F+pdx0gjI5xMicjYC4O43S2q5nyTpaGGMFmgHw= -github.com/databricks/databricks-sdk-go v0.52.0/go.mod h1:ds+zbv5mlQG7nFEU5ojLtgN/u0/9YzZmKQES/CfedzU= +github.com/databricks/databricks-sdk-go v0.54.0 h1:L8gsA3NXs+uYU3QtW/OUgjxMQxOH24k0MT9JhB3zLlM= +github.com/databricks/databricks-sdk-go v0.54.0/go.mod h1:ds+zbv5mlQG7nFEU5ojLtgN/u0/9YzZmKQES/CfedzU= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -48,8 +48,6 @@ github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic= github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU= @@ -111,6 +109,8 @@ github.com/hashicorp/terraform-exec v0.21.0 h1:uNkLAe95ey5Uux6KJdua6+cv8asgILFVW github.com/hashicorp/terraform-exec v0.21.0/go.mod h1:1PPeMYou+KDUSSeRE9szMZ/oHf4fYUmB923Wzbq1ICg= github.com/hashicorp/terraform-json v0.23.0 h1:sniCkExU4iKtTADReHzACkk8fnpQXrdD2xoR+lppBkI= github.com/hashicorp/terraform-json v0.23.0/go.mod h1:MHdXbBAbSg0GvzuWazEGKAn/cyNfIB7mN6y7KJN6y2c= +github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= +github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= @@ -158,6 +158,18 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/tidwall/gjson v1.14.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY= +github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= +github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= +github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4= +github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY= +github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28= +github.com/wI2L/jsondiff v0.6.1 h1:ISZb9oNWbP64LHnu4AUhsMF5W0FIj5Ok3Krip9Shqpw= +github.com/wI2L/jsondiff v0.6.1/go.mod h1:KAEIojdQq66oJiHhDyQez2x+sRit0vIzC9KeK0yizxM= github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= github.com/zclconf/go-cty v1.15.0 h1:tTCRWxsexYUmtt/wVxgDClUe+uQusuI443uL6e+5sXQ= @@ -176,8 +188,8 @@ go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= -golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= +golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= +golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 h1:LfspQV/FYTatPTr/3HzIcmiUFH7PGP+OQ6mgDYo3yuQ= golang.org/x/exp v0.0.0-20240222234643-814bf88cf225/go.mod h1:CxmFvTBINI24O/j8iY7H1xHzx2i4OsyguNBmN/uPtqc= @@ -200,8 +212,8 @@ golang.org/x/oauth2 v0.24.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbht golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ= -golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -212,14 +224,14 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= -golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.26.0 h1:WEQa6V3Gja/BhNxg540hBip/kkaYtRg3cxg4oXSw4AU= -golang.org/x/term v0.26.0/go.mod h1:Si5m1o57C5nBNQo5z1iq+XDijt21BDBDp2bK0QI8e3E= +golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= +golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= -golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -263,8 +275,6 @@ gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/integration/README.md b/integration/README.md new file mode 100644 index 000000000..1c1d7c6f6 --- /dev/null +++ b/integration/README.md @@ -0,0 +1,37 @@ +# Integration tests + +This directory contains integration tests for the project. + +The tree structure generally mirrors the source code tree structure. + +Requirements for new files in this directory: +* Every package **must** be named after its directory with `_test` appended + * Requiring a different package name for integration tests avoids aliasing with the main package. +* Every integration test package **must** include a `main_test.go` file. + +These requirements are enforced by a unit test in this directory. + +## Running integration tests + +Integration tests require the following environment variables: +* `CLOUD_ENV` - set to the cloud environment to use (e.g. `aws`, `azure`, `gcp`) +* `DATABRICKS_HOST` - set to the Databricks workspace to use +* `DATABRICKS_TOKEN` - set to the Databricks token to use + +Optional environment variables: +* `TEST_DEFAULT_WAREHOUSE_ID` - set to the default warehouse ID to use +* `TEST_METASTORE_ID` - set to the metastore ID to use +* `TEST_INSTANCE_POOL_ID` - set to the instance pool ID to use +* `TEST_BRICKS_CLUSTER_ID` - set to the cluster ID to use + +To run all integration tests, use the following command: + +```bash +go test ./integration/... +``` + +Alternatively: + +```bash +make integration +``` diff --git a/internal/dashboard_assumptions_test.go b/integration/assumptions/dashboard_assumptions_test.go similarity index 89% rename from internal/dashboard_assumptions_test.go rename to integration/assumptions/dashboard_assumptions_test.go index 64294873d..3a1dcc907 100644 --- a/internal/dashboard_assumptions_test.go +++ b/integration/assumptions/dashboard_assumptions_test.go @@ -1,10 +1,11 @@ -package internal +package assumptions_test import ( "encoding/base64" "testing" - "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/integration/internal/acc" + "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/dyn" "github.com/databricks/cli/libs/dyn/convert" "github.com/databricks/cli/libs/dyn/merge" @@ -18,16 +19,16 @@ import ( // Verify that importing a dashboard through the Workspace API retains the identity of the underying resource, // as well as properties exclusively accessible through the dashboards API. -func TestAccDashboardAssumptions_WorkspaceImport(t *testing.T) { +func TestDashboardAssumptions_WorkspaceImport(t *testing.T) { ctx, wt := acc.WorkspaceTest(t) t.Parallel() dashboardName := "New Dashboard" dashboardPayload := []byte(`{"pages":[{"name":"2506f97a","displayName":"New Page"}]}`) - warehouseId := acc.GetEnvOrSkipTest(t, "TEST_DEFAULT_WAREHOUSE_ID") + warehouseId := testutil.GetEnvOrSkipTest(t, "TEST_DEFAULT_WAREHOUSE_ID") - dir := wt.TemporaryWorkspaceDir("dashboard-assumptions-") + dir := acc.TemporaryWorkspaceDir(wt, "dashboard-assumptions-") dashboard, err := wt.W.Lakeview.Create(ctx, dashboards.CreateDashboardRequest{ Dashboard: &dashboards.Dashboard{ @@ -98,7 +99,7 @@ func TestAccDashboardAssumptions_WorkspaceImport(t *testing.T) { assert.Fail(t, "unexpected insert operation") return right, nil }, - VisitUpdate: func(basePath dyn.Path, left dyn.Value, right dyn.Value) (dyn.Value, error) { + VisitUpdate: func(basePath dyn.Path, left, right dyn.Value) (dyn.Value, error) { updatedFieldPaths = append(updatedFieldPaths, basePath.String()) return right, nil }, diff --git a/integration/assumptions/main_test.go b/integration/assumptions/main_test.go new file mode 100644 index 000000000..be2761385 --- /dev/null +++ b/integration/assumptions/main_test.go @@ -0,0 +1,13 @@ +package assumptions_test + +import ( + "testing" + + "github.com/databricks/cli/integration/internal" +) + +// TestMain is the entrypoint executed by the test runner. +// See [internal.Main] for prerequisites for running integration tests. +func TestMain(m *testing.M) { + internal.Main(m) +} diff --git a/internal/bundle/artifacts_test.go b/integration/bundle/artifacts_test.go similarity index 84% rename from internal/bundle/artifacts_test.go rename to integration/bundle/artifacts_test.go index 34d101e4f..1b71a1c3d 100644 --- a/internal/bundle/artifacts_test.go +++ b/integration/bundle/artifacts_test.go @@ -1,4 +1,4 @@ -package bundle +package bundle_test import ( "fmt" @@ -12,8 +12,10 @@ import ( "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config/resources" "github.com/databricks/cli/bundle/libraries" - "github.com/databricks/cli/internal" - "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/integration/internal/acc" + "github.com/databricks/cli/internal/testcli" + "github.com/databricks/cli/internal/testutil" + "github.com/databricks/cli/libs/env" "github.com/databricks/databricks-sdk-go/service/catalog" "github.com/databricks/databricks-sdk-go/service/compute" "github.com/databricks/databricks-sdk-go/service/jobs" @@ -23,21 +25,20 @@ import ( ) func touchEmptyFile(t *testing.T, path string) { - err := os.MkdirAll(filepath.Dir(path), 0700) + err := os.MkdirAll(filepath.Dir(path), 0o700) require.NoError(t, err) f, err := os.Create(path) require.NoError(t, err) f.Close() } -func TestAccUploadArtifactFileToCorrectRemotePath(t *testing.T) { +func TestUploadArtifactFileToCorrectRemotePath(t *testing.T) { ctx, wt := acc.WorkspaceTest(t) - w := wt.W dir := t.TempDir() whlPath := filepath.Join(dir, "dist", "test.whl") touchEmptyFile(t, whlPath) - wsDir := internal.TemporaryWorkspaceDir(t, w) + wsDir := acc.TemporaryWorkspaceDir(wt, "artifact-") b := &bundle.Bundle{ BundleRootPath: dir, @@ -95,14 +96,13 @@ func TestAccUploadArtifactFileToCorrectRemotePath(t *testing.T) { ) } -func TestAccUploadArtifactFileToCorrectRemotePathWithEnvironments(t *testing.T) { +func TestUploadArtifactFileToCorrectRemotePathWithEnvironments(t *testing.T) { ctx, wt := acc.WorkspaceTest(t) - w := wt.W dir := t.TempDir() whlPath := filepath.Join(dir, "dist", "test.whl") touchEmptyFile(t, whlPath) - wsDir := internal.TemporaryWorkspaceDir(t, w) + wsDir := acc.TemporaryWorkspaceDir(wt, "artifact-") b := &bundle.Bundle{ BundleRootPath: dir, @@ -160,15 +160,14 @@ func TestAccUploadArtifactFileToCorrectRemotePathWithEnvironments(t *testing.T) ) } -func TestAccUploadArtifactFileToCorrectRemotePathForVolumes(t *testing.T) { +func TestUploadArtifactFileToCorrectRemotePathForVolumes(t *testing.T) { ctx, wt := acc.WorkspaceTest(t) - w := wt.W if os.Getenv("TEST_METASTORE_ID") == "" { t.Skip("Skipping tests that require a UC Volume when metastore id is not set.") } - volumePath := internal.TemporaryUcVolume(t, w) + volumePath := acc.TemporaryVolume(wt) dir := t.TempDir() whlPath := filepath.Join(dir, "dist", "test.whl") @@ -230,11 +229,11 @@ func TestAccUploadArtifactFileToCorrectRemotePathForVolumes(t *testing.T) { ) } -func TestAccUploadArtifactFileToVolumeThatDoesNotExist(t *testing.T) { +func TestUploadArtifactFileToVolumeThatDoesNotExist(t *testing.T) { ctx, wt := acc.UcWorkspaceTest(t) w := wt.W - schemaName := internal.RandomName("schema-") + schemaName := testutil.RandomName("schema-") _, err := w.Schemas.Create(ctx, catalog.CreateSchema{ CatalogName: "main", @@ -248,15 +247,14 @@ func TestAccUploadArtifactFileToVolumeThatDoesNotExist(t *testing.T) { require.NoError(t, err) }) - bundleRoot, err := initTestTemplate(t, ctx, "artifact_path_with_volume", map[string]any{ + bundleRoot := initTestTemplate(t, ctx, "artifact_path_with_volume", map[string]any{ "unique_id": uuid.New().String(), "schema_name": schemaName, "volume_name": "doesnotexist", }) - require.NoError(t, err) - t.Setenv("BUNDLE_ROOT", bundleRoot) - stdout, stderr, err := internal.RequireErrorRun(t, "bundle", "deploy") + ctx = env.Set(ctx, "BUNDLE_ROOT", bundleRoot) + stdout, stderr, err := testcli.RequireErrorRun(t, ctx, "bundle", "deploy") assert.Error(t, err) assert.Equal(t, fmt.Sprintf(`Error: volume /Volumes/main/%s/doesnotexist does not exist: Not Found @@ -267,11 +265,11 @@ func TestAccUploadArtifactFileToVolumeThatDoesNotExist(t *testing.T) { assert.Equal(t, "", stderr.String()) } -func TestAccUploadArtifactToVolumeNotYetDeployed(t *testing.T) { +func TestUploadArtifactToVolumeNotYetDeployed(t *testing.T) { ctx, wt := acc.UcWorkspaceTest(t) w := wt.W - schemaName := internal.RandomName("schema-") + schemaName := testutil.RandomName("schema-") _, err := w.Schemas.Create(ctx, catalog.CreateSchema{ CatalogName: "main", @@ -285,15 +283,14 @@ func TestAccUploadArtifactToVolumeNotYetDeployed(t *testing.T) { require.NoError(t, err) }) - bundleRoot, err := initTestTemplate(t, ctx, "artifact_path_with_volume", map[string]any{ + bundleRoot := initTestTemplate(t, ctx, "artifact_path_with_volume", map[string]any{ "unique_id": uuid.New().String(), "schema_name": schemaName, "volume_name": "my_volume", }) - require.NoError(t, err) - t.Setenv("BUNDLE_ROOT", bundleRoot) - stdout, stderr, err := internal.RequireErrorRun(t, "bundle", "deploy") + ctx = env.Set(ctx, "BUNDLE_ROOT", bundleRoot) + stdout, stderr, err := testcli.RequireErrorRun(t, ctx, "bundle", "deploy") assert.Error(t, err) assert.Equal(t, fmt.Sprintf(`Error: volume /Volumes/main/%s/my_volume does not exist: Not Found diff --git a/integration/bundle/basic_test.go b/integration/bundle/basic_test.go new file mode 100644 index 000000000..79301b850 --- /dev/null +++ b/integration/bundle/basic_test.go @@ -0,0 +1,37 @@ +package bundle_test + +import ( + "os" + "path/filepath" + "testing" + + "github.com/databricks/cli/integration/internal/acc" + "github.com/databricks/cli/internal/testutil" + "github.com/google/uuid" + "github.com/stretchr/testify/require" +) + +func TestBasicBundleDeployWithFailOnActiveRuns(t *testing.T) { + ctx, _ := acc.WorkspaceTest(t) + + nodeTypeId := testutil.GetCloud(t).NodeTypeID() + uniqueId := uuid.New().String() + root := initTestTemplate(t, ctx, "basic", map[string]any{ + "unique_id": uniqueId, + "node_type_id": nodeTypeId, + "spark_version": defaultSparkVersion, + }) + + t.Cleanup(func() { + destroyBundle(t, ctx, root) + }) + + // deploy empty bundle + deployBundleWithFlags(t, ctx, root, []string{"--fail-on-active-runs"}) + + // Remove .databricks directory to simulate a fresh deployment + require.NoError(t, os.RemoveAll(filepath.Join(root, ".databricks"))) + + // deploy empty bundle again + deployBundleWithFlags(t, ctx, root, []string{"--fail-on-active-runs"}) +} diff --git a/internal/bundle/bind_resource_test.go b/integration/bundle/bind_resource_test.go similarity index 66% rename from internal/bundle/bind_resource_test.go rename to integration/bundle/bind_resource_test.go index 8cc5da536..508aa3410 100644 --- a/internal/bundle/bind_resource_test.go +++ b/integration/bundle/bind_resource_test.go @@ -1,4 +1,4 @@ -package bundle +package bundle_test import ( "fmt" @@ -6,8 +6,10 @@ import ( "path/filepath" "testing" - "github.com/databricks/cli/internal" - "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/integration/internal/acc" + "github.com/databricks/cli/internal/testcli" + "github.com/databricks/cli/internal/testutil" + "github.com/databricks/cli/libs/env" "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/service/jobs" "github.com/google/uuid" @@ -15,39 +17,33 @@ import ( "github.com/stretchr/testify/require" ) -func TestAccBindJobToExistingJob(t *testing.T) { - env := internal.GetEnvOrSkipTest(t, "CLOUD_ENV") - t.Log(env) - +func TestBindJobToExistingJob(t *testing.T) { ctx, wt := acc.WorkspaceTest(t) - gt := &generateJobTest{T: t, w: wt.W} + gt := &generateJobTest{T: wt, w: wt.W} - nodeTypeId := internal.GetNodeTypeId(env) + nodeTypeId := testutil.GetCloud(t).NodeTypeID() uniqueId := uuid.New().String() - bundleRoot, err := initTestTemplate(t, ctx, "basic", map[string]any{ + bundleRoot := initTestTemplate(t, ctx, "basic", map[string]any{ "unique_id": uniqueId, "spark_version": "13.3.x-scala2.12", "node_type_id": nodeTypeId, }) - require.NoError(t, err) jobId := gt.createTestJob(ctx) t.Cleanup(func() { gt.destroyJob(ctx, jobId) - require.NoError(t, err) }) - t.Setenv("BUNDLE_ROOT", bundleRoot) - c := internal.NewCobraTestRunner(t, "bundle", "deployment", "bind", "foo", fmt.Sprint(jobId), "--auto-approve") - _, _, err = c.Run() + ctx = env.Set(ctx, "BUNDLE_ROOT", bundleRoot) + c := testcli.NewRunner(t, ctx, "bundle", "deployment", "bind", "foo", fmt.Sprint(jobId), "--auto-approve") + _, _, err := c.Run() require.NoError(t, err) // Remove .databricks directory to simulate a fresh deployment err = os.RemoveAll(filepath.Join(bundleRoot, ".databricks")) require.NoError(t, err) - err = deployBundle(t, ctx, bundleRoot) - require.NoError(t, err) + deployBundle(t, ctx, bundleRoot) w, err := databricks.NewWorkspaceClient() require.NoError(t, err) @@ -60,7 +56,7 @@ func TestAccBindJobToExistingJob(t *testing.T) { require.Equal(t, job.Settings.Name, fmt.Sprintf("test-job-basic-%s", uniqueId)) require.Contains(t, job.Settings.Tasks[0].SparkPythonTask.PythonFile, "hello_world.py") - c = internal.NewCobraTestRunner(t, "bundle", "deployment", "unbind", "foo") + c = testcli.NewRunner(t, ctx, "bundle", "deployment", "unbind", "foo") _, _, err = c.Run() require.NoError(t, err) @@ -68,8 +64,7 @@ func TestAccBindJobToExistingJob(t *testing.T) { err = os.RemoveAll(filepath.Join(bundleRoot, ".databricks")) require.NoError(t, err) - err = destroyBundle(t, ctx, bundleRoot) - require.NoError(t, err) + destroyBundle(t, ctx, bundleRoot) // Check that job is unbound and exists after bundle is destroyed job, err = w.Jobs.Get(ctx, jobs.GetJobRequest{ @@ -80,21 +75,17 @@ func TestAccBindJobToExistingJob(t *testing.T) { require.Contains(t, job.Settings.Tasks[0].SparkPythonTask.PythonFile, "hello_world.py") } -func TestAccAbortBind(t *testing.T) { - env := internal.GetEnvOrSkipTest(t, "CLOUD_ENV") - t.Log(env) - +func TestAbortBind(t *testing.T) { ctx, wt := acc.WorkspaceTest(t) - gt := &generateJobTest{T: t, w: wt.W} + gt := &generateJobTest{T: wt, w: wt.W} - nodeTypeId := internal.GetNodeTypeId(env) + nodeTypeId := testutil.GetCloud(t).NodeTypeID() uniqueId := uuid.New().String() - bundleRoot, err := initTestTemplate(t, ctx, "basic", map[string]any{ + bundleRoot := initTestTemplate(t, ctx, "basic", map[string]any{ "unique_id": uniqueId, "spark_version": "13.3.x-scala2.12", "node_type_id": nodeTypeId, }) - require.NoError(t, err) jobId := gt.createTestJob(ctx) t.Cleanup(func() { @@ -103,17 +94,16 @@ func TestAccAbortBind(t *testing.T) { }) // Bind should fail because prompting is not possible. - t.Setenv("BUNDLE_ROOT", bundleRoot) - t.Setenv("TERM", "dumb") - c := internal.NewCobraTestRunner(t, "bundle", "deployment", "bind", "foo", fmt.Sprint(jobId)) + ctx = env.Set(ctx, "BUNDLE_ROOT", bundleRoot) + ctx = env.Set(ctx, "TERM", "dumb") + c := testcli.NewRunner(t, ctx, "bundle", "deployment", "bind", "foo", fmt.Sprint(jobId)) // Expect error suggesting to use --auto-approve - _, _, err = c.Run() + _, _, err := c.Run() assert.ErrorContains(t, err, "failed to bind the resource") assert.ErrorContains(t, err, "This bind operation requires user confirmation, but the current console does not support prompting. Please specify --auto-approve if you would like to skip prompts and proceed") - err = deployBundle(t, ctx, bundleRoot) - require.NoError(t, err) + deployBundle(t, ctx, bundleRoot) w, err := databricks.NewWorkspaceClient() require.NoError(t, err) @@ -128,18 +118,14 @@ func TestAccAbortBind(t *testing.T) { require.Contains(t, job.Settings.Tasks[0].NotebookTask.NotebookPath, "test") } -func TestAccGenerateAndBind(t *testing.T) { - env := internal.GetEnvOrSkipTest(t, "CLOUD_ENV") - t.Log(env) - +func TestGenerateAndBind(t *testing.T) { ctx, wt := acc.WorkspaceTest(t) - gt := &generateJobTest{T: t, w: wt.W} + gt := &generateJobTest{T: wt, w: wt.W} uniqueId := uuid.New().String() - bundleRoot, err := initTestTemplate(t, ctx, "with_includes", map[string]any{ + bundleRoot := initTestTemplate(t, ctx, "with_includes", map[string]any{ "unique_id": uniqueId, }) - require.NoError(t, err) w, err := databricks.NewWorkspaceClient() require.NoError(t, err) @@ -154,8 +140,8 @@ func TestAccGenerateAndBind(t *testing.T) { } }) - t.Setenv("BUNDLE_ROOT", bundleRoot) - c := internal.NewCobraTestRunnerWithContext(t, ctx, "bundle", "generate", "job", + ctx = env.Set(ctx, "BUNDLE_ROOT", bundleRoot) + c := testcli.NewRunner(t, ctx, "bundle", "generate", "job", "--key", "test_job_key", "--existing-job-id", fmt.Sprint(jobId), "--config-dir", filepath.Join(bundleRoot, "resources"), @@ -171,15 +157,13 @@ func TestAccGenerateAndBind(t *testing.T) { require.Len(t, matches, 1) - c = internal.NewCobraTestRunner(t, "bundle", "deployment", "bind", "test_job_key", fmt.Sprint(jobId), "--auto-approve") + c = testcli.NewRunner(t, ctx, "bundle", "deployment", "bind", "test_job_key", fmt.Sprint(jobId), "--auto-approve") _, _, err = c.Run() require.NoError(t, err) - err = deployBundle(t, ctx, bundleRoot) - require.NoError(t, err) + deployBundle(t, ctx, bundleRoot) - err = destroyBundle(t, ctx, bundleRoot) - require.NoError(t, err) + destroyBundle(t, ctx, bundleRoot) // Check that job is bound and does not extsts after bundle is destroyed _, err = w.Jobs.Get(ctx, jobs.GetJobRequest{ diff --git a/internal/bundle/bundles/artifact_path_with_volume/databricks_template_schema.json b/integration/bundle/bundles/artifact_path_with_volume/databricks_template_schema.json similarity index 100% rename from internal/bundle/bundles/artifact_path_with_volume/databricks_template_schema.json rename to integration/bundle/bundles/artifact_path_with_volume/databricks_template_schema.json diff --git a/internal/bundle/bundles/artifact_path_with_volume/template/databricks.yml.tmpl b/integration/bundle/bundles/artifact_path_with_volume/template/databricks.yml.tmpl similarity index 100% rename from internal/bundle/bundles/artifact_path_with_volume/template/databricks.yml.tmpl rename to integration/bundle/bundles/artifact_path_with_volume/template/databricks.yml.tmpl diff --git a/internal/bundle/bundles/basic/databricks_template_schema.json b/integration/bundle/bundles/basic/databricks_template_schema.json similarity index 100% rename from internal/bundle/bundles/basic/databricks_template_schema.json rename to integration/bundle/bundles/basic/databricks_template_schema.json diff --git a/internal/bundle/bundles/basic/template/databricks.yml.tmpl b/integration/bundle/bundles/basic/template/databricks.yml.tmpl similarity index 100% rename from internal/bundle/bundles/basic/template/databricks.yml.tmpl rename to integration/bundle/bundles/basic/template/databricks.yml.tmpl diff --git a/internal/bundle/bundles/basic/template/hello_world.py b/integration/bundle/bundles/basic/template/hello_world.py similarity index 100% rename from internal/bundle/bundles/basic/template/hello_world.py rename to integration/bundle/bundles/basic/template/hello_world.py diff --git a/internal/bundle/bundles/clusters/databricks_template_schema.json b/integration/bundle/bundles/clusters/databricks_template_schema.json similarity index 100% rename from internal/bundle/bundles/clusters/databricks_template_schema.json rename to integration/bundle/bundles/clusters/databricks_template_schema.json diff --git a/internal/bundle/bundles/clusters/template/databricks.yml.tmpl b/integration/bundle/bundles/clusters/template/databricks.yml.tmpl similarity index 100% rename from internal/bundle/bundles/clusters/template/databricks.yml.tmpl rename to integration/bundle/bundles/clusters/template/databricks.yml.tmpl diff --git a/internal/bundle/bundles/clusters/template/hello_world.py b/integration/bundle/bundles/clusters/template/hello_world.py similarity index 100% rename from internal/bundle/bundles/clusters/template/hello_world.py rename to integration/bundle/bundles/clusters/template/hello_world.py diff --git a/internal/bundle/bundles/dashboards/databricks_template_schema.json b/integration/bundle/bundles/dashboards/databricks_template_schema.json similarity index 100% rename from internal/bundle/bundles/dashboards/databricks_template_schema.json rename to integration/bundle/bundles/dashboards/databricks_template_schema.json diff --git a/internal/bundle/bundles/dashboards/template/dashboard.lvdash.json b/integration/bundle/bundles/dashboards/template/dashboard.lvdash.json similarity index 100% rename from internal/bundle/bundles/dashboards/template/dashboard.lvdash.json rename to integration/bundle/bundles/dashboards/template/dashboard.lvdash.json diff --git a/internal/bundle/bundles/dashboards/template/databricks.yml.tmpl b/integration/bundle/bundles/dashboards/template/databricks.yml.tmpl similarity index 100% rename from internal/bundle/bundles/dashboards/template/databricks.yml.tmpl rename to integration/bundle/bundles/dashboards/template/databricks.yml.tmpl diff --git a/internal/bundle/bundles/deploy_then_remove_resources/databricks_template_schema.json b/integration/bundle/bundles/deploy_then_remove_resources/databricks_template_schema.json similarity index 100% rename from internal/bundle/bundles/deploy_then_remove_resources/databricks_template_schema.json rename to integration/bundle/bundles/deploy_then_remove_resources/databricks_template_schema.json diff --git a/internal/bundle/bundles/deploy_then_remove_resources/template/bar.py b/integration/bundle/bundles/deploy_then_remove_resources/template/bar.py similarity index 100% rename from internal/bundle/bundles/deploy_then_remove_resources/template/bar.py rename to integration/bundle/bundles/deploy_then_remove_resources/template/bar.py diff --git a/internal/bundle/bundles/deploy_then_remove_resources/template/databricks.yml.tmpl b/integration/bundle/bundles/deploy_then_remove_resources/template/databricks.yml.tmpl similarity index 100% rename from internal/bundle/bundles/deploy_then_remove_resources/template/databricks.yml.tmpl rename to integration/bundle/bundles/deploy_then_remove_resources/template/databricks.yml.tmpl diff --git a/internal/bundle/bundles/deploy_then_remove_resources/template/foo.py b/integration/bundle/bundles/deploy_then_remove_resources/template/foo.py similarity index 100% rename from internal/bundle/bundles/deploy_then_remove_resources/template/foo.py rename to integration/bundle/bundles/deploy_then_remove_resources/template/foo.py diff --git a/internal/bundle/bundles/deploy_then_remove_resources/template/resources.yml.tmpl b/integration/bundle/bundles/deploy_then_remove_resources/template/resources.yml.tmpl similarity index 100% rename from internal/bundle/bundles/deploy_then_remove_resources/template/resources.yml.tmpl rename to integration/bundle/bundles/deploy_then_remove_resources/template/resources.yml.tmpl diff --git a/internal/bundle/bundles/job_metadata/databricks_template_schema.json b/integration/bundle/bundles/job_metadata/databricks_template_schema.json similarity index 100% rename from internal/bundle/bundles/job_metadata/databricks_template_schema.json rename to integration/bundle/bundles/job_metadata/databricks_template_schema.json diff --git a/internal/bundle/bundles/job_metadata/template/a/b/bar.py b/integration/bundle/bundles/job_metadata/template/a/b/bar.py similarity index 100% rename from internal/bundle/bundles/job_metadata/template/a/b/bar.py rename to integration/bundle/bundles/job_metadata/template/a/b/bar.py diff --git a/internal/bundle/bundles/job_metadata/template/a/b/resources.yml.tmpl b/integration/bundle/bundles/job_metadata/template/a/b/resources.yml.tmpl similarity index 100% rename from internal/bundle/bundles/job_metadata/template/a/b/resources.yml.tmpl rename to integration/bundle/bundles/job_metadata/template/a/b/resources.yml.tmpl diff --git a/internal/bundle/bundles/job_metadata/template/databricks.yml.tmpl b/integration/bundle/bundles/job_metadata/template/databricks.yml.tmpl similarity index 100% rename from internal/bundle/bundles/job_metadata/template/databricks.yml.tmpl rename to integration/bundle/bundles/job_metadata/template/databricks.yml.tmpl diff --git a/internal/bundle/bundles/job_metadata/template/foo.py b/integration/bundle/bundles/job_metadata/template/foo.py similarity index 100% rename from internal/bundle/bundles/job_metadata/template/foo.py rename to integration/bundle/bundles/job_metadata/template/foo.py diff --git a/internal/bundle/bundles/python_wheel_task/databricks_template_schema.json b/integration/bundle/bundles/python_wheel_task/databricks_template_schema.json similarity index 100% rename from internal/bundle/bundles/python_wheel_task/databricks_template_schema.json rename to integration/bundle/bundles/python_wheel_task/databricks_template_schema.json diff --git a/internal/bundle/bundles/python_wheel_task/template/databricks.yml.tmpl b/integration/bundle/bundles/python_wheel_task/template/databricks.yml.tmpl similarity index 100% rename from internal/bundle/bundles/python_wheel_task/template/databricks.yml.tmpl rename to integration/bundle/bundles/python_wheel_task/template/databricks.yml.tmpl diff --git a/internal/bundle/bundles/python_wheel_task/template/setup.py.tmpl b/integration/bundle/bundles/python_wheel_task/template/setup.py.tmpl similarity index 100% rename from internal/bundle/bundles/python_wheel_task/template/setup.py.tmpl rename to integration/bundle/bundles/python_wheel_task/template/setup.py.tmpl diff --git a/internal/bundle/bundles/python_wheel_task/template/{{.project_name}}/__init__.py b/integration/bundle/bundles/python_wheel_task/template/{{.project_name}}/__init__.py similarity index 100% rename from internal/bundle/bundles/python_wheel_task/template/{{.project_name}}/__init__.py rename to integration/bundle/bundles/python_wheel_task/template/{{.project_name}}/__init__.py diff --git a/internal/bundle/bundles/python_wheel_task/template/{{.project_name}}/__main__.py b/integration/bundle/bundles/python_wheel_task/template/{{.project_name}}/__main__.py similarity index 100% rename from internal/bundle/bundles/python_wheel_task/template/{{.project_name}}/__main__.py rename to integration/bundle/bundles/python_wheel_task/template/{{.project_name}}/__main__.py diff --git a/internal/bundle/bundles/python_wheel_task_with_cluster/databricks_template_schema.json b/integration/bundle/bundles/python_wheel_task_with_cluster/databricks_template_schema.json similarity index 100% rename from internal/bundle/bundles/python_wheel_task_with_cluster/databricks_template_schema.json rename to integration/bundle/bundles/python_wheel_task_with_cluster/databricks_template_schema.json diff --git a/internal/bundle/bundles/python_wheel_task_with_cluster/template/databricks.yml.tmpl b/integration/bundle/bundles/python_wheel_task_with_cluster/template/databricks.yml.tmpl similarity index 100% rename from internal/bundle/bundles/python_wheel_task_with_cluster/template/databricks.yml.tmpl rename to integration/bundle/bundles/python_wheel_task_with_cluster/template/databricks.yml.tmpl diff --git a/internal/bundle/bundles/python_wheel_task_with_cluster/template/setup.py.tmpl b/integration/bundle/bundles/python_wheel_task_with_cluster/template/setup.py.tmpl similarity index 100% rename from internal/bundle/bundles/python_wheel_task_with_cluster/template/setup.py.tmpl rename to integration/bundle/bundles/python_wheel_task_with_cluster/template/setup.py.tmpl diff --git a/internal/bundle/bundles/python_wheel_task_with_cluster/template/{{.project_name}}/__init__.py b/integration/bundle/bundles/python_wheel_task_with_cluster/template/{{.project_name}}/__init__.py similarity index 100% rename from internal/bundle/bundles/python_wheel_task_with_cluster/template/{{.project_name}}/__init__.py rename to integration/bundle/bundles/python_wheel_task_with_cluster/template/{{.project_name}}/__init__.py diff --git a/internal/bundle/bundles/python_wheel_task_with_cluster/template/{{.project_name}}/__main__.py b/integration/bundle/bundles/python_wheel_task_with_cluster/template/{{.project_name}}/__main__.py similarity index 100% rename from internal/bundle/bundles/python_wheel_task_with_cluster/template/{{.project_name}}/__main__.py rename to integration/bundle/bundles/python_wheel_task_with_cluster/template/{{.project_name}}/__main__.py diff --git a/internal/bundle/bundles/python_wheel_task_with_environments/databricks_template_schema.json b/integration/bundle/bundles/python_wheel_task_with_environments/databricks_template_schema.json similarity index 100% rename from internal/bundle/bundles/python_wheel_task_with_environments/databricks_template_schema.json rename to integration/bundle/bundles/python_wheel_task_with_environments/databricks_template_schema.json diff --git a/internal/bundle/bundles/python_wheel_task_with_environments/template/databricks.yml.tmpl b/integration/bundle/bundles/python_wheel_task_with_environments/template/databricks.yml.tmpl similarity index 100% rename from internal/bundle/bundles/python_wheel_task_with_environments/template/databricks.yml.tmpl rename to integration/bundle/bundles/python_wheel_task_with_environments/template/databricks.yml.tmpl diff --git a/internal/bundle/bundles/python_wheel_task_with_environments/template/setup.py.tmpl b/integration/bundle/bundles/python_wheel_task_with_environments/template/setup.py.tmpl similarity index 100% rename from internal/bundle/bundles/python_wheel_task_with_environments/template/setup.py.tmpl rename to integration/bundle/bundles/python_wheel_task_with_environments/template/setup.py.tmpl diff --git a/internal/bundle/bundles/python_wheel_task_with_environments/template/{{.project_name}}/__init__.py b/integration/bundle/bundles/python_wheel_task_with_environments/template/{{.project_name}}/__init__.py similarity index 100% rename from internal/bundle/bundles/python_wheel_task_with_environments/template/{{.project_name}}/__init__.py rename to integration/bundle/bundles/python_wheel_task_with_environments/template/{{.project_name}}/__init__.py diff --git a/internal/bundle/bundles/python_wheel_task_with_environments/template/{{.project_name}}/__main__.py b/integration/bundle/bundles/python_wheel_task_with_environments/template/{{.project_name}}/__main__.py similarity index 100% rename from internal/bundle/bundles/python_wheel_task_with_environments/template/{{.project_name}}/__main__.py rename to integration/bundle/bundles/python_wheel_task_with_environments/template/{{.project_name}}/__main__.py diff --git a/internal/bundle/bundles/recreate_pipeline/databricks_template_schema.json b/integration/bundle/bundles/recreate_pipeline/databricks_template_schema.json similarity index 100% rename from internal/bundle/bundles/recreate_pipeline/databricks_template_schema.json rename to integration/bundle/bundles/recreate_pipeline/databricks_template_schema.json diff --git a/internal/bundle/bundles/recreate_pipeline/template/databricks.yml.tmpl b/integration/bundle/bundles/recreate_pipeline/template/databricks.yml.tmpl similarity index 100% rename from internal/bundle/bundles/recreate_pipeline/template/databricks.yml.tmpl rename to integration/bundle/bundles/recreate_pipeline/template/databricks.yml.tmpl diff --git a/internal/bundle/bundles/recreate_pipeline/template/nb.sql b/integration/bundle/bundles/recreate_pipeline/template/nb.sql similarity index 100% rename from internal/bundle/bundles/recreate_pipeline/template/nb.sql rename to integration/bundle/bundles/recreate_pipeline/template/nb.sql diff --git a/internal/bundle/bundles/spark_jar_task/databricks_template_schema.json b/integration/bundle/bundles/spark_jar_task/databricks_template_schema.json similarity index 100% rename from internal/bundle/bundles/spark_jar_task/databricks_template_schema.json rename to integration/bundle/bundles/spark_jar_task/databricks_template_schema.json diff --git a/internal/bundle/bundles/spark_jar_task/template/databricks.yml.tmpl b/integration/bundle/bundles/spark_jar_task/template/databricks.yml.tmpl similarity index 100% rename from internal/bundle/bundles/spark_jar_task/template/databricks.yml.tmpl rename to integration/bundle/bundles/spark_jar_task/template/databricks.yml.tmpl diff --git a/internal/bundle/bundles/spark_jar_task/template/{{.project_name}}/META-INF/MANIFEST.MF b/integration/bundle/bundles/spark_jar_task/template/{{.project_name}}/META-INF/MANIFEST.MF similarity index 100% rename from internal/bundle/bundles/spark_jar_task/template/{{.project_name}}/META-INF/MANIFEST.MF rename to integration/bundle/bundles/spark_jar_task/template/{{.project_name}}/META-INF/MANIFEST.MF diff --git a/internal/bundle/bundles/spark_jar_task/template/{{.project_name}}/PrintArgs.java b/integration/bundle/bundles/spark_jar_task/template/{{.project_name}}/PrintArgs.java similarity index 100% rename from internal/bundle/bundles/spark_jar_task/template/{{.project_name}}/PrintArgs.java rename to integration/bundle/bundles/spark_jar_task/template/{{.project_name}}/PrintArgs.java diff --git a/internal/bundle/bundles/uc_schema/databricks_template_schema.json b/integration/bundle/bundles/uc_schema/databricks_template_schema.json similarity index 100% rename from internal/bundle/bundles/uc_schema/databricks_template_schema.json rename to integration/bundle/bundles/uc_schema/databricks_template_schema.json diff --git a/internal/bundle/bundles/uc_schema/template/databricks.yml.tmpl b/integration/bundle/bundles/uc_schema/template/databricks.yml.tmpl similarity index 93% rename from internal/bundle/bundles/uc_schema/template/databricks.yml.tmpl rename to integration/bundle/bundles/uc_schema/template/databricks.yml.tmpl index 15076ac85..0cb8d4f61 100644 --- a/internal/bundle/bundles/uc_schema/template/databricks.yml.tmpl +++ b/integration/bundle/bundles/uc_schema/template/databricks.yml.tmpl @@ -12,7 +12,6 @@ resources: - notebook: path: ./nb.sql development: true - catalog: main include: - "*.yml" diff --git a/internal/bundle/bundles/uc_schema/template/nb.sql b/integration/bundle/bundles/uc_schema/template/nb.sql similarity index 100% rename from internal/bundle/bundles/uc_schema/template/nb.sql rename to integration/bundle/bundles/uc_schema/template/nb.sql diff --git a/internal/bundle/bundles/uc_schema/template/schema.yml.tmpl b/integration/bundle/bundles/uc_schema/template/schema.yml.tmpl similarity index 91% rename from internal/bundle/bundles/uc_schema/template/schema.yml.tmpl rename to integration/bundle/bundles/uc_schema/template/schema.yml.tmpl index 50067036e..0fcf10453 100644 --- a/internal/bundle/bundles/uc_schema/template/schema.yml.tmpl +++ b/integration/bundle/bundles/uc_schema/template/schema.yml.tmpl @@ -11,3 +11,4 @@ targets: pipelines: foo: target: ${resources.schemas.bar.id} + catalog: main diff --git a/internal/bundle/bundles/volume/databricks_template_schema.json b/integration/bundle/bundles/volume/databricks_template_schema.json similarity index 100% rename from internal/bundle/bundles/volume/databricks_template_schema.json rename to integration/bundle/bundles/volume/databricks_template_schema.json diff --git a/internal/bundle/bundles/volume/template/databricks.yml.tmpl b/integration/bundle/bundles/volume/template/databricks.yml.tmpl similarity index 100% rename from internal/bundle/bundles/volume/template/databricks.yml.tmpl rename to integration/bundle/bundles/volume/template/databricks.yml.tmpl diff --git a/internal/bundle/bundles/volume/template/nb.sql b/integration/bundle/bundles/volume/template/nb.sql similarity index 100% rename from internal/bundle/bundles/volume/template/nb.sql rename to integration/bundle/bundles/volume/template/nb.sql diff --git a/internal/bundle/bundles/with_includes/databricks_template_schema.json b/integration/bundle/bundles/with_includes/databricks_template_schema.json similarity index 100% rename from internal/bundle/bundles/with_includes/databricks_template_schema.json rename to integration/bundle/bundles/with_includes/databricks_template_schema.json diff --git a/internal/bundle/bundles/with_includes/template/databricks.yml.tmpl b/integration/bundle/bundles/with_includes/template/databricks.yml.tmpl similarity index 100% rename from internal/bundle/bundles/with_includes/template/databricks.yml.tmpl rename to integration/bundle/bundles/with_includes/template/databricks.yml.tmpl diff --git a/internal/bundle/clusters_test.go b/integration/bundle/clusters_test.go similarity index 68% rename from internal/bundle/clusters_test.go rename to integration/bundle/clusters_test.go index a961f3ea8..449206208 100644 --- a/internal/bundle/clusters_test.go +++ b/integration/bundle/clusters_test.go @@ -1,37 +1,33 @@ -package bundle +package bundle_test import ( "fmt" "testing" - "github.com/databricks/cli/internal" - "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/integration/internal/acc" "github.com/databricks/cli/internal/testutil" - "github.com/databricks/cli/libs/env" "github.com/databricks/databricks-sdk-go/service/compute" "github.com/google/uuid" "github.com/stretchr/testify/require" ) -func TestAccDeployBundleWithCluster(t *testing.T) { - ctx, wt := acc.WorkspaceTest(t) - - if testutil.IsAWSCloud(wt.T) { +func TestDeployBundleWithCluster(t *testing.T) { + if testutil.GetCloud(t) == testutil.AWS { t.Skip("Skipping test for AWS cloud because it is not permitted to create clusters") } - nodeTypeId := internal.GetNodeTypeId(env.Get(ctx, "CLOUD_ENV")) + ctx, wt := acc.WorkspaceTest(t) + + nodeTypeId := testutil.GetCloud(t).NodeTypeID() uniqueId := uuid.New().String() - root, err := initTestTemplate(t, ctx, "clusters", map[string]any{ + root := initTestTemplate(t, ctx, "clusters", map[string]any{ "unique_id": uniqueId, "node_type_id": nodeTypeId, "spark_version": defaultSparkVersion, }) - require.NoError(t, err) t.Cleanup(func() { - err = destroyBundle(t, ctx, root) - require.NoError(t, err) + destroyBundle(t, ctx, root) cluster, err := wt.W.Clusters.GetByClusterName(ctx, fmt.Sprintf("test-cluster-%s", uniqueId)) if err != nil { @@ -39,17 +35,20 @@ func TestAccDeployBundleWithCluster(t *testing.T) { } else { require.Contains(t, []compute.State{compute.StateTerminated, compute.StateTerminating}, cluster.State) } - }) - err = deployBundle(t, ctx, root) - require.NoError(t, err) + deployBundle(t, ctx, root) // Cluster should exists after bundle deployment cluster, err := wt.W.Clusters.GetByClusterName(ctx, fmt.Sprintf("test-cluster-%s", uniqueId)) require.NoError(t, err) require.NotNil(t, cluster) + if testing.Short() { + t.Log("Skip the job run in short mode") + return + } + out, err := runResource(t, ctx, root, "foo") require.NoError(t, err) require.Contains(t, out, "Hello World!") diff --git a/internal/bundle/dashboards_test.go b/integration/bundle/dashboards_test.go similarity index 77% rename from internal/bundle/dashboards_test.go rename to integration/bundle/dashboards_test.go index 3c2e27c62..83b4b8b03 100644 --- a/internal/bundle/dashboards_test.go +++ b/integration/bundle/dashboards_test.go @@ -1,10 +1,11 @@ -package bundle +package bundle_test import ( "fmt" "testing" - "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/integration/internal/acc" + "github.com/databricks/cli/internal/testutil" "github.com/databricks/databricks-sdk-go/service/dashboards" "github.com/databricks/databricks-sdk-go/service/workspace" "github.com/google/uuid" @@ -12,24 +13,21 @@ import ( "github.com/stretchr/testify/require" ) -func TestAccDashboards(t *testing.T) { +func TestDashboards(t *testing.T) { ctx, wt := acc.WorkspaceTest(t) - warehouseID := acc.GetEnvOrSkipTest(t, "TEST_DEFAULT_WAREHOUSE_ID") + warehouseID := testutil.GetEnvOrSkipTest(t, "TEST_DEFAULT_WAREHOUSE_ID") uniqueID := uuid.New().String() - root, err := initTestTemplate(t, ctx, "dashboards", map[string]any{ + root := initTestTemplate(t, ctx, "dashboards", map[string]any{ "unique_id": uniqueID, "warehouse_id": warehouseID, }) - require.NoError(t, err) t.Cleanup(func() { - err = destroyBundle(t, ctx, root) - require.NoError(t, err) + destroyBundle(t, ctx, root) }) - err = deployBundle(t, ctx, root) - require.NoError(t, err) + deployBundle(t, ctx, root) // Load bundle configuration by running the validate command. b := unmarshalConfig(t, mustValidateBundle(t, ctx, root)) @@ -54,12 +52,11 @@ func TestAccDashboards(t *testing.T) { require.NoError(t, err) // Try to redeploy the bundle and confirm that the out of band modification is detected. - stdout, _, err := deployBundleWithArgs(t, ctx, root) + stdout, _, err := deployBundleWithArgsErr(t, ctx, root) require.Error(t, err) assert.Contains(t, stdout, `Error: dashboard "file_reference" has been modified remotely`+"\n") // Redeploy the bundle with the --force flag and confirm that the out of band modification is ignored. - _, stderr, err := deployBundleWithArgs(t, ctx, root, "--force") - require.NoError(t, err) + _, stderr := deployBundleWithArgs(t, ctx, root, "--force") assert.Contains(t, stderr, `Deployment complete!`+"\n") } diff --git a/internal/bundle/deploy_test.go b/integration/bundle/deploy_test.go similarity index 79% rename from internal/bundle/deploy_test.go rename to integration/bundle/deploy_test.go index 759e85de5..0b37e5630 100644 --- a/internal/bundle/deploy_test.go +++ b/integration/bundle/deploy_test.go @@ -1,4 +1,4 @@ -package bundle +package bundle_test import ( "context" @@ -11,8 +11,9 @@ import ( "testing" "github.com/databricks/cli/cmd/root" - "github.com/databricks/cli/internal" - "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/integration/internal/acc" + "github.com/databricks/cli/internal/testcli" + "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/env" "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/apierr" @@ -24,13 +25,11 @@ import ( ) func setupUcSchemaBundle(t *testing.T, ctx context.Context, w *databricks.WorkspaceClient, uniqueId string) string { - bundleRoot, err := initTestTemplate(t, ctx, "uc_schema", map[string]any{ + bundleRoot := initTestTemplate(t, ctx, "uc_schema", map[string]any{ "unique_id": uniqueId, }) - require.NoError(t, err) - err = deployBundle(t, ctx, bundleRoot) - require.NoError(t, err) + deployBundle(t, ctx, bundleRoot) t.Cleanup(func() { destroyBundle(t, ctx, bundleRoot) @@ -80,7 +79,7 @@ func setupUcSchemaBundle(t *testing.T, ctx context.Context, w *databricks.Worksp return bundleRoot } -func TestAccBundleDeployUcSchema(t *testing.T) { +func TestBundleDeployUcSchema(t *testing.T) { ctx, wt := acc.UcWorkspaceTest(t) w := wt.W @@ -95,8 +94,7 @@ func TestAccBundleDeployUcSchema(t *testing.T) { require.NoError(t, err) // Redeploy the bundle - err = deployBundle(t, ctx, bundleRoot) - require.NoError(t, err) + deployBundle(t, ctx, bundleRoot) // Assert the schema is deleted _, err = w.Schemas.GetByFullName(ctx, strings.Join([]string{catalogName, schemaName}, ".")) @@ -105,7 +103,7 @@ func TestAccBundleDeployUcSchema(t *testing.T) { assert.Equal(t, "SCHEMA_DOES_NOT_EXIST", apiErr.ErrorCode) } -func TestAccBundleDeployUcSchemaFailsWithoutAutoApprove(t *testing.T) { +func TestBundleDeployUcSchemaFailsWithoutAutoApprove(t *testing.T) { ctx, wt := acc.UcWorkspaceTest(t) w := wt.W @@ -117,9 +115,9 @@ func TestAccBundleDeployUcSchemaFailsWithoutAutoApprove(t *testing.T) { require.NoError(t, err) // Redeploy the bundle - t.Setenv("BUNDLE_ROOT", bundleRoot) - t.Setenv("TERM", "dumb") - c := internal.NewCobraTestRunnerWithContext(t, ctx, "bundle", "deploy", "--force-lock") + ctx = env.Set(ctx, "BUNDLE_ROOT", bundleRoot) + ctx = env.Set(ctx, "TERM", "dumb") + c := testcli.NewRunner(t, ctx, "bundle", "deploy", "--force-lock") stdout, stderr, err := c.Run() assert.EqualError(t, err, root.ErrAlreadyPrinted.Error()) @@ -127,22 +125,20 @@ func TestAccBundleDeployUcSchemaFailsWithoutAutoApprove(t *testing.T) { assert.Contains(t, stdout.String(), "the deployment requires destructive actions, but current console does not support prompting. Please specify --auto-approve if you would like to skip prompts and proceed") } -func TestAccBundlePipelineDeleteWithoutAutoApprove(t *testing.T) { +func TestBundlePipelineDeleteWithoutAutoApprove(t *testing.T) { ctx, wt := acc.WorkspaceTest(t) w := wt.W - nodeTypeId := internal.GetNodeTypeId(env.Get(ctx, "CLOUD_ENV")) + nodeTypeId := testutil.GetCloud(t).NodeTypeID() uniqueId := uuid.New().String() - bundleRoot, err := initTestTemplate(t, ctx, "deploy_then_remove_resources", map[string]any{ + bundleRoot := initTestTemplate(t, ctx, "deploy_then_remove_resources", map[string]any{ "unique_id": uniqueId, "node_type_id": nodeTypeId, "spark_version": defaultSparkVersion, }) - require.NoError(t, err) // deploy pipeline - err = deployBundle(t, ctx, bundleRoot) - require.NoError(t, err) + deployBundle(t, ctx, bundleRoot) // assert pipeline is created pipelineName := "test-bundle-pipeline-" + uniqueId @@ -161,9 +157,9 @@ func TestAccBundlePipelineDeleteWithoutAutoApprove(t *testing.T) { require.NoError(t, err) // Redeploy the bundle. Expect it to fail because deleting the pipeline requires --auto-approve. - t.Setenv("BUNDLE_ROOT", bundleRoot) - t.Setenv("TERM", "dumb") - c := internal.NewCobraTestRunnerWithContext(t, ctx, "bundle", "deploy", "--force-lock") + ctx = env.Set(ctx, "BUNDLE_ROOT", bundleRoot) + ctx = env.Set(ctx, "TERM", "dumb") + c := testcli.NewRunner(t, ctx, "bundle", "deploy", "--force-lock") stdout, stderr, err := c.Run() assert.EqualError(t, err, root.ErrAlreadyPrinted.Error()) @@ -173,21 +169,18 @@ restore the defined STs and MVs through full refresh. Note that recreation is ne properties such as the 'catalog' or 'storage' are changed: delete pipeline bar`) assert.Contains(t, stdout.String(), "the deployment requires destructive actions, but current console does not support prompting. Please specify --auto-approve if you would like to skip prompts and proceed") - } -func TestAccBundlePipelineRecreateWithoutAutoApprove(t *testing.T) { +func TestBundlePipelineRecreateWithoutAutoApprove(t *testing.T) { ctx, wt := acc.UcWorkspaceTest(t) w := wt.W uniqueId := uuid.New().String() - bundleRoot, err := initTestTemplate(t, ctx, "recreate_pipeline", map[string]any{ + bundleRoot := initTestTemplate(t, ctx, "recreate_pipeline", map[string]any{ "unique_id": uniqueId, }) - require.NoError(t, err) - err = deployBundle(t, ctx, bundleRoot) - require.NoError(t, err) + deployBundle(t, ctx, bundleRoot) t.Cleanup(func() { destroyBundle(t, ctx, bundleRoot) @@ -200,9 +193,9 @@ func TestAccBundlePipelineRecreateWithoutAutoApprove(t *testing.T) { require.Equal(t, pipelineName, pipeline.Name) // Redeploy the bundle, pointing the DLT pipeline to a different UC catalog. - t.Setenv("BUNDLE_ROOT", bundleRoot) - t.Setenv("TERM", "dumb") - c := internal.NewCobraTestRunnerWithContext(t, ctx, "bundle", "deploy", "--force-lock", "--var=\"catalog=whatever\"") + ctx = env.Set(ctx, "BUNDLE_ROOT", bundleRoot) + ctx = env.Set(ctx, "TERM", "dumb") + c := testcli.NewRunner(t, ctx, "bundle", "deploy", "--force-lock", "--var=\"catalog=whatever\"") stdout, stderr, err := c.Run() assert.EqualError(t, err, root.ErrAlreadyPrinted.Error()) @@ -214,27 +207,25 @@ properties such as the 'catalog' or 'storage' are changed: assert.Contains(t, stdout.String(), "the deployment requires destructive actions, but current console does not support prompting. Please specify --auto-approve if you would like to skip prompts and proceed") } -func TestAccDeployBasicBundleLogs(t *testing.T) { +func TestDeployBasicBundleLogs(t *testing.T) { ctx, wt := acc.WorkspaceTest(t) - nodeTypeId := internal.GetNodeTypeId(env.Get(ctx, "CLOUD_ENV")) + nodeTypeId := testutil.GetCloud(t).NodeTypeID() uniqueId := uuid.New().String() - root, err := initTestTemplate(t, ctx, "basic", map[string]any{ + root := initTestTemplate(t, ctx, "basic", map[string]any{ "unique_id": uniqueId, "node_type_id": nodeTypeId, "spark_version": defaultSparkVersion, }) - require.NoError(t, err) t.Cleanup(func() { - err = destroyBundle(t, ctx, root) - require.NoError(t, err) + destroyBundle(t, ctx, root) }) currentUser, err := wt.W.CurrentUser.Me(ctx) require.NoError(t, err) - stdout, stderr := blackBoxRun(t, root, "bundle", "deploy") + stdout, stderr := blackBoxRun(t, ctx, root, "bundle", "deploy") assert.Equal(t, strings.Join([]string{ fmt.Sprintf("Uploading bundle files to /Workspace/Users/%s/.bundle/%s/files...", currentUser.UserName, uniqueId), "Deploying resources...", @@ -244,18 +235,16 @@ func TestAccDeployBasicBundleLogs(t *testing.T) { assert.Equal(t, "", stdout) } -func TestAccDeployUcVolume(t *testing.T) { +func TestDeployUcVolume(t *testing.T) { ctx, wt := acc.UcWorkspaceTest(t) w := wt.W uniqueId := uuid.New().String() - bundleRoot, err := initTestTemplate(t, ctx, "volume", map[string]any{ + bundleRoot := initTestTemplate(t, ctx, "volume", map[string]any{ "unique_id": uniqueId, }) - require.NoError(t, err) - err = deployBundle(t, ctx, bundleRoot) - require.NoError(t, err) + deployBundle(t, ctx, bundleRoot) t.Cleanup(func() { destroyBundle(t, ctx, bundleRoot) @@ -280,9 +269,9 @@ func TestAccDeployUcVolume(t *testing.T) { assert.Equal(t, []catalog.Privilege{catalog.PrivilegeWriteVolume}, grants.PrivilegeAssignments[0].Privileges) // Recreation of the volume without --auto-approve should fail since prompting is not possible - t.Setenv("TERM", "dumb") - t.Setenv("BUNDLE_ROOT", bundleRoot) - stdout, stderr, err := internal.NewCobraTestRunnerWithContext(t, ctx, "bundle", "deploy", "--var=schema_name=${resources.schemas.schema2.name}").Run() + ctx = env.Set(ctx, "BUNDLE_ROOT", bundleRoot) + ctx = env.Set(ctx, "TERM", "dumb") + stdout, stderr, err := testcli.NewRunner(t, ctx, "bundle", "deploy", "--var=schema_name=${resources.schemas.schema2.name}").Run() assert.Error(t, err) assert.Contains(t, stderr.String(), `This action will result in the deletion or recreation of the following volumes. For managed volumes, the files stored in the volume are also deleted from your @@ -292,9 +281,9 @@ is removed from the catalog, but the underlying files are not deleted: assert.Contains(t, stdout.String(), "the deployment requires destructive actions, but current console does not support prompting. Please specify --auto-approve if you would like to skip prompts and proceed") // Successfully recreate the volume with --auto-approve - t.Setenv("TERM", "dumb") - t.Setenv("BUNDLE_ROOT", bundleRoot) - _, _, err = internal.NewCobraTestRunnerWithContext(t, ctx, "bundle", "deploy", "--var=schema_name=${resources.schemas.schema2.name}", "--auto-approve").Run() + ctx = env.Set(ctx, "BUNDLE_ROOT", bundleRoot) + ctx = env.Set(ctx, "TERM", "dumb") + _, _, err = testcli.NewRunner(t, ctx, "bundle", "deploy", "--var=schema_name=${resources.schemas.schema2.name}", "--auto-approve").Run() assert.NoError(t, err) // Assert the volume is updated successfully diff --git a/internal/bundle/deploy_then_remove_resources_test.go b/integration/bundle/deploy_then_remove_resources_test.go similarity index 67% rename from internal/bundle/deploy_then_remove_resources_test.go rename to integration/bundle/deploy_then_remove_resources_test.go index 66ec5c16a..052d84dd6 100644 --- a/internal/bundle/deploy_then_remove_resources_test.go +++ b/integration/bundle/deploy_then_remove_resources_test.go @@ -1,34 +1,31 @@ -package bundle +package bundle_test import ( "os" "path/filepath" "testing" - "github.com/databricks/cli/internal" - "github.com/databricks/cli/internal/acc" - "github.com/databricks/cli/libs/env" + "github.com/databricks/cli/integration/internal/acc" + "github.com/databricks/cli/internal/testutil" "github.com/google/uuid" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) -func TestAccBundleDeployThenRemoveResources(t *testing.T) { +func TestBundleDeployThenRemoveResources(t *testing.T) { ctx, wt := acc.WorkspaceTest(t) w := wt.W - nodeTypeId := internal.GetNodeTypeId(env.Get(ctx, "CLOUD_ENV")) + nodeTypeId := testutil.GetCloud(t).NodeTypeID() uniqueId := uuid.New().String() - bundleRoot, err := initTestTemplate(t, ctx, "deploy_then_remove_resources", map[string]any{ + bundleRoot := initTestTemplate(t, ctx, "deploy_then_remove_resources", map[string]any{ "unique_id": uniqueId, "node_type_id": nodeTypeId, "spark_version": defaultSparkVersion, }) - require.NoError(t, err) // deploy pipeline - err = deployBundle(t, ctx, bundleRoot) - require.NoError(t, err) + deployBundle(t, ctx, bundleRoot) // assert pipeline is created pipelineName := "test-bundle-pipeline-" + uniqueId @@ -47,8 +44,7 @@ func TestAccBundleDeployThenRemoveResources(t *testing.T) { require.NoError(t, err) // deploy again - err = deployBundle(t, ctx, bundleRoot) - require.NoError(t, err) + deployBundle(t, ctx, bundleRoot) // assert pipeline is deleted _, err = w.Pipelines.GetByName(ctx, pipelineName) @@ -59,7 +55,6 @@ func TestAccBundleDeployThenRemoveResources(t *testing.T) { assert.ErrorContains(t, err, "does not exist") t.Cleanup(func() { - err = destroyBundle(t, ctx, bundleRoot) - require.NoError(t, err) + destroyBundle(t, ctx, bundleRoot) }) } diff --git a/integration/bundle/deploy_to_shared_test.go b/integration/bundle/deploy_to_shared_test.go new file mode 100644 index 000000000..b4395f4c6 --- /dev/null +++ b/integration/bundle/deploy_to_shared_test.go @@ -0,0 +1,34 @@ +package bundle_test + +import ( + "fmt" + "testing" + + "github.com/databricks/cli/integration/internal/acc" + "github.com/databricks/cli/internal/testutil" + "github.com/google/uuid" + "github.com/stretchr/testify/require" +) + +func TestDeployBasicToSharedWorkspacePath(t *testing.T) { + ctx, wt := acc.WorkspaceTest(t) + + nodeTypeId := testutil.GetCloud(t).NodeTypeID() + uniqueId := uuid.New().String() + + currentUser, err := wt.W.CurrentUser.Me(ctx) + require.NoError(t, err) + + bundleRoot := initTestTemplate(t, ctx, "basic", map[string]any{ + "unique_id": uniqueId, + "node_type_id": nodeTypeId, + "spark_version": defaultSparkVersion, + "root_path": fmt.Sprintf("/Shared/%s", currentUser.UserName), + }) + + t.Cleanup(func() { + destroyBundle(wt, ctx, bundleRoot) + }) + + deployBundle(wt, ctx, bundleRoot) +} diff --git a/internal/bundle/deployment_state_test.go b/integration/bundle/deployment_state_test.go similarity index 77% rename from internal/bundle/deployment_state_test.go rename to integration/bundle/deployment_state_test.go index 25f36d4a2..fff1504d2 100644 --- a/internal/bundle/deployment_state_test.go +++ b/integration/bundle/deployment_state_test.go @@ -1,4 +1,4 @@ -package bundle +package bundle_test import ( "os" @@ -7,43 +7,39 @@ import ( "testing" "github.com/databricks/cli/bundle/deploy" - "github.com/databricks/cli/internal" - "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/integration/internal/acc" + "github.com/databricks/cli/internal/testutil" + "github.com/databricks/cli/libs/env" "github.com/google/uuid" "github.com/stretchr/testify/require" ) -func TestAccFilesAreSyncedCorrectlyWhenNoSnapshot(t *testing.T) { - env := internal.GetEnvOrSkipTest(t, "CLOUD_ENV") - t.Log(env) - +func TestFilesAreSyncedCorrectlyWhenNoSnapshot(t *testing.T) { ctx, wt := acc.WorkspaceTest(t) w := wt.W - nodeTypeId := internal.GetNodeTypeId(env) + nodeTypeId := testutil.GetCloud(t).NodeTypeID() uniqueId := uuid.New().String() - bundleRoot, err := initTestTemplate(t, ctx, "basic", map[string]any{ + bundleRoot := initTestTemplate(t, ctx, "basic", map[string]any{ "unique_id": uniqueId, "spark_version": "13.3.x-scala2.12", "node_type_id": nodeTypeId, }) - require.NoError(t, err) - t.Setenv("BUNDLE_ROOT", bundleRoot) + ctx = env.Set(ctx, "BUNDLE_ROOT", bundleRoot) // Add some test file to the bundle - err = os.WriteFile(filepath.Join(bundleRoot, "test.py"), []byte("print('Hello, World!')"), 0644) + err := os.WriteFile(filepath.Join(bundleRoot, "test.py"), []byte("print('Hello, World!')"), 0o644) require.NoError(t, err) - err = os.WriteFile(filepath.Join(bundleRoot, "test_to_modify.py"), []byte("print('Hello, World!')"), 0644) + err = os.WriteFile(filepath.Join(bundleRoot, "test_to_modify.py"), []byte("print('Hello, World!')"), 0o644) require.NoError(t, err) // Add notebook to the bundle - err = os.WriteFile(filepath.Join(bundleRoot, "notebook.py"), []byte("# Databricks notebook source\nHello, World!"), 0644) + err = os.WriteFile(filepath.Join(bundleRoot, "notebook.py"), []byte("# Databricks notebook source\nHello, World!"), 0o644) require.NoError(t, err) - err = deployBundle(t, ctx, bundleRoot) - require.NoError(t, err) + deployBundle(t, ctx, bundleRoot) t.Cleanup(func() { destroyBundle(t, ctx, bundleRoot) @@ -79,11 +75,10 @@ func TestAccFilesAreSyncedCorrectlyWhenNoSnapshot(t *testing.T) { require.NoError(t, err) // Modify the content of another file - err = os.WriteFile(filepath.Join(bundleRoot, "test_to_modify.py"), []byte("print('Modified!')"), 0644) + err = os.WriteFile(filepath.Join(bundleRoot, "test_to_modify.py"), []byte("print('Modified!')"), 0o644) require.NoError(t, err) - err = deployBundle(t, ctx, bundleRoot) - require.NoError(t, err) + deployBundle(t, ctx, bundleRoot) // Check that removed file is not in workspace anymore _, err = w.Workspace.GetStatusByPath(ctx, path.Join(remoteRoot, "files", "test.py")) diff --git a/internal/bundle/destroy_test.go b/integration/bundle/destroy_test.go similarity index 78% rename from internal/bundle/destroy_test.go rename to integration/bundle/destroy_test.go index baccf4e6f..f18138ce5 100644 --- a/internal/bundle/destroy_test.go +++ b/integration/bundle/destroy_test.go @@ -1,4 +1,4 @@ -package bundle +package bundle_test import ( "errors" @@ -6,37 +6,34 @@ import ( "path/filepath" "testing" - "github.com/databricks/cli/internal" - "github.com/databricks/cli/internal/acc" - "github.com/databricks/cli/libs/env" + "github.com/databricks/cli/integration/internal/acc" + "github.com/databricks/cli/internal/testutil" "github.com/databricks/databricks-sdk-go/apierr" "github.com/google/uuid" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) -func TestAccBundleDestroy(t *testing.T) { +func TestBundleDestroy(t *testing.T) { ctx, wt := acc.WorkspaceTest(t) w := wt.W - nodeTypeId := internal.GetNodeTypeId(env.Get(ctx, "CLOUD_ENV")) + nodeTypeId := testutil.GetCloud(t).NodeTypeID() uniqueId := uuid.New().String() - bundleRoot, err := initTestTemplate(t, ctx, "deploy_then_remove_resources", map[string]any{ + bundleRoot := initTestTemplate(t, ctx, "deploy_then_remove_resources", map[string]any{ "unique_id": uniqueId, "node_type_id": nodeTypeId, "spark_version": defaultSparkVersion, }) - require.NoError(t, err) snapshotsDir := filepath.Join(bundleRoot, ".databricks", "bundle", "default", "sync-snapshots") // Assert the snapshot file does not exist - _, err = os.ReadDir(snapshotsDir) + _, err := os.ReadDir(snapshotsDir) assert.ErrorIs(t, err, os.ErrNotExist) // deploy resources - err = deployBundle(t, ctx, bundleRoot) - require.NoError(t, err) + deployBundle(t, ctx, bundleRoot) // Assert the snapshot file exists entries, err := os.ReadDir(snapshotsDir) @@ -61,8 +58,7 @@ func TestAccBundleDestroy(t *testing.T) { assert.Equal(t, job.Settings.Name, jobName) // destroy bundle - err = destroyBundle(t, ctx, bundleRoot) - require.NoError(t, err) + destroyBundle(t, ctx, bundleRoot) // assert pipeline is deleted _, err = w.Pipelines.GetByName(ctx, pipelineName) diff --git a/internal/bundle/empty_bundle_test.go b/integration/bundle/empty_bundle_test.go similarity index 67% rename from internal/bundle/empty_bundle_test.go rename to integration/bundle/empty_bundle_test.go index 36883ae00..1ab240d13 100644 --- a/internal/bundle/empty_bundle_test.go +++ b/integration/bundle/empty_bundle_test.go @@ -1,4 +1,4 @@ -package bundle +package bundle_test import ( "fmt" @@ -6,12 +6,12 @@ import ( "path/filepath" "testing" - "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/integration/internal/acc" "github.com/google/uuid" "github.com/stretchr/testify/require" ) -func TestAccEmptyBundleDeploy(t *testing.T) { +func TestEmptyBundleDeploy(t *testing.T) { ctx, _ := acc.WorkspaceTest(t) // create empty bundle @@ -26,11 +26,9 @@ func TestAccEmptyBundleDeploy(t *testing.T) { f.Close() // deploy empty bundle - err = deployBundle(t, ctx, tmpDir) - require.NoError(t, err) + deployBundle(t, ctx, tmpDir) t.Cleanup(func() { - err = destroyBundle(t, ctx, tmpDir) - require.NoError(t, err) + destroyBundle(t, ctx, tmpDir) }) } diff --git a/internal/bundle/environments_test.go b/integration/bundle/environments_test.go similarity index 71% rename from internal/bundle/environments_test.go rename to integration/bundle/environments_test.go index 5cffe8857..e0dc91532 100644 --- a/internal/bundle/environments_test.go +++ b/integration/bundle/environments_test.go @@ -1,25 +1,23 @@ -package bundle +package bundle_test import ( "testing" - "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/integration/internal/acc" "github.com/google/uuid" "github.com/stretchr/testify/require" ) -func TestAccPythonWheelTaskWithEnvironmentsDeployAndRun(t *testing.T) { +func TestPythonWheelTaskWithEnvironmentsDeployAndRun(t *testing.T) { t.Skip("Skipping test until serveless is enabled") ctx, _ := acc.WorkspaceTest(t) - bundleRoot, err := initTestTemplate(t, ctx, "python_wheel_task_with_environments", map[string]any{ + bundleRoot := initTestTemplate(t, ctx, "python_wheel_task_with_environments", map[string]any{ "unique_id": uuid.New().String(), }) - require.NoError(t, err) - err = deployBundle(t, ctx, bundleRoot) - require.NoError(t, err) + deployBundle(t, ctx, bundleRoot) t.Cleanup(func() { destroyBundle(t, ctx, bundleRoot) diff --git a/internal/bundle/generate_job_test.go b/integration/bundle/generate_job_test.go similarity index 74% rename from internal/bundle/generate_job_test.go rename to integration/bundle/generate_job_test.go index 847a7a14e..b68bb7d61 100644 --- a/internal/bundle/generate_job_test.go +++ b/integration/bundle/generate_job_test.go @@ -1,4 +1,4 @@ -package bundle +package bundle_test import ( "context" @@ -9,9 +9,10 @@ import ( "strings" "testing" - "github.com/databricks/cli/internal" - "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/integration/internal/acc" + "github.com/databricks/cli/internal/testcli" "github.com/databricks/cli/internal/testutil" + "github.com/databricks/cli/libs/env" "github.com/databricks/cli/libs/filer" "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/service/compute" @@ -20,27 +21,26 @@ import ( "github.com/stretchr/testify/require" ) -func TestAccGenerateFromExistingJobAndDeploy(t *testing.T) { +func TestGenerateFromExistingJobAndDeploy(t *testing.T) { ctx, wt := acc.WorkspaceTest(t) - gt := &generateJobTest{T: t, w: wt.W} + gt := &generateJobTest{T: wt, w: wt.W} uniqueId := uuid.New().String() - bundleRoot, err := initTestTemplate(t, ctx, "with_includes", map[string]any{ + bundleRoot := initTestTemplate(t, ctx, "with_includes", map[string]any{ "unique_id": uniqueId, }) - require.NoError(t, err) jobId := gt.createTestJob(ctx) t.Cleanup(func() { gt.destroyJob(ctx, jobId) }) - t.Setenv("BUNDLE_ROOT", bundleRoot) - c := internal.NewCobraTestRunnerWithContext(t, ctx, "bundle", "generate", "job", + ctx = env.Set(ctx, "BUNDLE_ROOT", bundleRoot) + c := testcli.NewRunner(t, ctx, "bundle", "generate", "job", "--existing-job-id", fmt.Sprint(jobId), "--config-dir", filepath.Join(bundleRoot, "resources"), "--source-dir", filepath.Join(bundleRoot, "src")) - _, _, err = c.Run() + _, _, err := c.Run() require.NoError(t, err) _, err = os.Stat(filepath.Join(bundleRoot, "src", "test.py")) @@ -61,15 +61,13 @@ func TestAccGenerateFromExistingJobAndDeploy(t *testing.T) { require.Contains(t, generatedYaml, "spark_version: 13.3.x-scala2.12") require.Contains(t, generatedYaml, "num_workers: 1") - err = deployBundle(t, ctx, bundleRoot) - require.NoError(t, err) + deployBundle(t, ctx, bundleRoot) - err = destroyBundle(t, ctx, bundleRoot) - require.NoError(t, err) + destroyBundle(t, ctx, bundleRoot) } type generateJobTest struct { - T *testing.T + T *acc.WorkspaceT w *databricks.WorkspaceClient } @@ -77,17 +75,7 @@ func (gt *generateJobTest) createTestJob(ctx context.Context) int64 { t := gt.T w := gt.w - var nodeTypeId string - switch testutil.GetCloud(t) { - case testutil.AWS: - nodeTypeId = "i3.xlarge" - case testutil.Azure: - nodeTypeId = "Standard_DS4_v2" - case testutil.GCP: - nodeTypeId = "n1-standard-4" - } - - tmpdir := internal.TemporaryWorkspaceDir(t, w) + tmpdir := acc.TemporaryWorkspaceDir(t, "generate-job-") f, err := filer.NewWorkspaceFilesClient(w, tmpdir) require.NoError(t, err) @@ -95,14 +83,14 @@ func (gt *generateJobTest) createTestJob(ctx context.Context) int64 { require.NoError(t, err) resp, err := w.Jobs.Create(ctx, jobs.CreateJob{ - Name: internal.RandomName("generated-job-"), + Name: testutil.RandomName("generated-job-"), Tasks: []jobs.Task{ { TaskKey: "test", NewCluster: &compute.ClusterSpec{ SparkVersion: "13.3.x-scala2.12", NumWorkers: 1, - NodeTypeId: nodeTypeId, + NodeTypeId: testutil.GetCloud(t).NodeTypeID(), SparkConf: map[string]string{ "spark.databricks.enableWsfs": "true", "spark.databricks.hive.metastore.glueCatalog.enabled": "true", diff --git a/internal/bundle/generate_pipeline_test.go b/integration/bundle/generate_pipeline_test.go similarity index 77% rename from internal/bundle/generate_pipeline_test.go rename to integration/bundle/generate_pipeline_test.go index 82467952d..7843ec0c3 100644 --- a/internal/bundle/generate_pipeline_test.go +++ b/integration/bundle/generate_pipeline_test.go @@ -1,4 +1,4 @@ -package bundle +package bundle_test import ( "context" @@ -9,8 +9,10 @@ import ( "strings" "testing" - "github.com/databricks/cli/internal" - "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/integration/internal/acc" + "github.com/databricks/cli/internal/testcli" + "github.com/databricks/cli/internal/testutil" + "github.com/databricks/cli/libs/env" "github.com/databricks/cli/libs/filer" "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/service/pipelines" @@ -18,27 +20,26 @@ import ( "github.com/stretchr/testify/require" ) -func TestAccGenerateFromExistingPipelineAndDeploy(t *testing.T) { +func TestGenerateFromExistingPipelineAndDeploy(t *testing.T) { ctx, wt := acc.WorkspaceTest(t) - gt := &generatePipelineTest{T: t, w: wt.W} + gt := &generatePipelineTest{T: wt, w: wt.W} uniqueId := uuid.New().String() - bundleRoot, err := initTestTemplate(t, ctx, "with_includes", map[string]any{ + bundleRoot := initTestTemplate(t, ctx, "with_includes", map[string]any{ "unique_id": uniqueId, }) - require.NoError(t, err) pipelineId, name := gt.createTestPipeline(ctx) t.Cleanup(func() { gt.destroyPipeline(ctx, pipelineId) }) - t.Setenv("BUNDLE_ROOT", bundleRoot) - c := internal.NewCobraTestRunnerWithContext(t, ctx, "bundle", "generate", "pipeline", + ctx = env.Set(ctx, "BUNDLE_ROOT", bundleRoot) + c := testcli.NewRunner(t, ctx, "bundle", "generate", "pipeline", "--existing-pipeline-id", fmt.Sprint(pipelineId), "--config-dir", filepath.Join(bundleRoot, "resources"), "--source-dir", filepath.Join(bundleRoot, "src")) - _, _, err = c.Run() + _, _, err := c.Run() require.NoError(t, err) _, err = os.Stat(filepath.Join(bundleRoot, "src", "notebook.py")) @@ -58,8 +59,8 @@ func TestAccGenerateFromExistingPipelineAndDeploy(t *testing.T) { generatedYaml := string(data) // Replace pipeline name - generatedYaml = strings.ReplaceAll(generatedYaml, name, internal.RandomName("copy-generated-pipeline-")) - err = os.WriteFile(fileName, []byte(generatedYaml), 0644) + generatedYaml = strings.ReplaceAll(generatedYaml, name, testutil.RandomName("copy-generated-pipeline-")) + err = os.WriteFile(fileName, []byte(generatedYaml), 0o644) require.NoError(t, err) require.Contains(t, generatedYaml, "libraries:") @@ -68,15 +69,13 @@ func TestAccGenerateFromExistingPipelineAndDeploy(t *testing.T) { require.Contains(t, generatedYaml, "- file:") require.Contains(t, generatedYaml, fmt.Sprintf("path: %s", filepath.Join("..", "src", "test.py"))) - err = deployBundle(t, ctx, bundleRoot) - require.NoError(t, err) + deployBundle(t, ctx, bundleRoot) - err = destroyBundle(t, ctx, bundleRoot) - require.NoError(t, err) + destroyBundle(t, ctx, bundleRoot) } type generatePipelineTest struct { - T *testing.T + T *acc.WorkspaceT w *databricks.WorkspaceClient } @@ -84,7 +83,7 @@ func (gt *generatePipelineTest) createTestPipeline(ctx context.Context) (string, t := gt.T w := gt.w - tmpdir := internal.TemporaryWorkspaceDir(t, w) + tmpdir := acc.TemporaryWorkspaceDir(t, "generate-pipeline-") f, err := filer.NewWorkspaceFilesClient(w, tmpdir) require.NoError(t, err) @@ -94,10 +93,9 @@ func (gt *generatePipelineTest) createTestPipeline(ctx context.Context) (string, err = f.Write(ctx, "test.py", strings.NewReader("print('Hello!')")) require.NoError(t, err) - env := internal.GetEnvOrSkipTest(t, "CLOUD_ENV") - nodeTypeId := internal.GetNodeTypeId(env) + nodeTypeId := testutil.GetCloud(t).NodeTypeID() - name := internal.RandomName("generated-pipeline-") + name := testutil.RandomName("generated-pipeline-") resp, err := w.Pipelines.Create(ctx, pipelines.CreatePipeline{ Name: name, Libraries: []pipelines.PipelineLibrary{ diff --git a/internal/bundle/helpers.go b/integration/bundle/helpers_test.go similarity index 50% rename from internal/bundle/helpers.go rename to integration/bundle/helpers_test.go index dd9c841c9..e884cd8c6 100644 --- a/internal/bundle/helpers.go +++ b/integration/bundle/helpers_test.go @@ -1,4 +1,4 @@ -package bundle +package bundle_test import ( "bytes" @@ -9,133 +9,136 @@ import ( "os/exec" "path/filepath" "strings" - "testing" "github.com/databricks/cli/bundle" "github.com/databricks/cli/cmd/root" - "github.com/databricks/cli/internal" + "github.com/databricks/cli/internal/testcli" + "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/cmdio" "github.com/databricks/cli/libs/env" "github.com/databricks/cli/libs/filer" "github.com/databricks/cli/libs/flags" + "github.com/databricks/cli/libs/folders" "github.com/databricks/cli/libs/template" - "github.com/databricks/cli/libs/vfs" "github.com/databricks/databricks-sdk-go" "github.com/stretchr/testify/require" ) const defaultSparkVersion = "13.3.x-snapshot-scala2.12" -func initTestTemplate(t *testing.T, ctx context.Context, templateName string, config map[string]any) (string, error) { +func initTestTemplate(t testutil.TestingT, ctx context.Context, templateName string, config map[string]any) string { bundleRoot := t.TempDir() return initTestTemplateWithBundleRoot(t, ctx, templateName, config, bundleRoot) } -func initTestTemplateWithBundleRoot(t *testing.T, ctx context.Context, templateName string, config map[string]any, bundleRoot string) (string, error) { +func initTestTemplateWithBundleRoot(t testutil.TestingT, ctx context.Context, templateName string, config map[string]any, bundleRoot string) string { templateRoot := filepath.Join("bundles", templateName) - configFilePath, err := writeConfigFile(t, config) - if err != nil { - return "", err - } + configFilePath := writeConfigFile(t, config) ctx = root.SetWorkspaceClient(ctx, nil) - cmd := cmdio.NewIO(flags.OutputJSON, strings.NewReader(""), os.Stdout, os.Stderr, "", "bundles") + cmd := cmdio.NewIO(ctx, flags.OutputJSON, strings.NewReader(""), os.Stdout, os.Stderr, "", "bundles") ctx = cmdio.InContext(ctx, cmd) out, err := filer.NewLocalClient(bundleRoot) require.NoError(t, err) err = template.Materialize(ctx, configFilePath, os.DirFS(templateRoot), out) - return bundleRoot, err + require.NoError(t, err) + return bundleRoot } -func writeConfigFile(t *testing.T, config map[string]any) (string, error) { +func writeConfigFile(t testutil.TestingT, config map[string]any) string { bytes, err := json.Marshal(config) - if err != nil { - return "", err - } + require.NoError(t, err) dir := t.TempDir() filepath := filepath.Join(dir, "config.json") t.Log("Configuration for template: ", string(bytes)) - err = os.WriteFile(filepath, bytes, 0644) - return filepath, err + err = os.WriteFile(filepath, bytes, 0o644) + require.NoError(t, err) + return filepath } -func validateBundle(t *testing.T, ctx context.Context, path string) ([]byte, error) { +func validateBundle(t testutil.TestingT, ctx context.Context, path string) ([]byte, error) { ctx = env.Set(ctx, "BUNDLE_ROOT", path) - c := internal.NewCobraTestRunnerWithContext(t, ctx, "bundle", "validate", "--output", "json") + c := testcli.NewRunner(t, ctx, "bundle", "validate", "--output", "json") stdout, _, err := c.Run() return stdout.Bytes(), err } -func mustValidateBundle(t *testing.T, ctx context.Context, path string) []byte { +func mustValidateBundle(t testutil.TestingT, ctx context.Context, path string) []byte { data, err := validateBundle(t, ctx, path) require.NoError(t, err) return data } -func unmarshalConfig(t *testing.T, data []byte) *bundle.Bundle { +func unmarshalConfig(t testutil.TestingT, data []byte) *bundle.Bundle { bundle := &bundle.Bundle{} err := json.Unmarshal(data, &bundle.Config) require.NoError(t, err) return bundle } -func deployBundle(t *testing.T, ctx context.Context, path string) error { +func deployBundle(t testutil.TestingT, ctx context.Context, path string) { ctx = env.Set(ctx, "BUNDLE_ROOT", path) - c := internal.NewCobraTestRunnerWithContext(t, ctx, "bundle", "deploy", "--force-lock", "--auto-approve") + c := testcli.NewRunner(t, ctx, "bundle", "deploy", "--force-lock", "--auto-approve") _, _, err := c.Run() - return err + require.NoError(t, err) } -func deployBundleWithArgs(t *testing.T, ctx context.Context, path string, args ...string) (string, string, error) { +func deployBundleWithArgsErr(t testutil.TestingT, ctx context.Context, path string, args ...string) (string, string, error) { ctx = env.Set(ctx, "BUNDLE_ROOT", path) args = append([]string{"bundle", "deploy"}, args...) - c := internal.NewCobraTestRunnerWithContext(t, ctx, args...) + c := testcli.NewRunner(t, ctx, args...) stdout, stderr, err := c.Run() return stdout.String(), stderr.String(), err } -func deployBundleWithFlags(t *testing.T, ctx context.Context, path string, flags []string) error { +func deployBundleWithArgs(t testutil.TestingT, ctx context.Context, path string, args ...string) (string, string) { + stdout, stderr, err := deployBundleWithArgsErr(t, ctx, path, args...) + require.NoError(t, err) + return stdout, stderr +} + +func deployBundleWithFlags(t testutil.TestingT, ctx context.Context, path string, flags []string) { ctx = env.Set(ctx, "BUNDLE_ROOT", path) args := []string{"bundle", "deploy", "--force-lock"} args = append(args, flags...) - c := internal.NewCobraTestRunnerWithContext(t, ctx, args...) + c := testcli.NewRunner(t, ctx, args...) _, _, err := c.Run() - return err + require.NoError(t, err) } -func runResource(t *testing.T, ctx context.Context, path string, key string) (string, error) { +func runResource(t testutil.TestingT, ctx context.Context, path, key string) (string, error) { ctx = env.Set(ctx, "BUNDLE_ROOT", path) ctx = cmdio.NewContext(ctx, cmdio.Default()) - c := internal.NewCobraTestRunnerWithContext(t, ctx, "bundle", "run", key) + c := testcli.NewRunner(t, ctx, "bundle", "run", key) stdout, _, err := c.Run() return stdout.String(), err } -func runResourceWithParams(t *testing.T, ctx context.Context, path string, key string, params ...string) (string, error) { +func runResourceWithParams(t testutil.TestingT, ctx context.Context, path, key string, params ...string) (string, error) { ctx = env.Set(ctx, "BUNDLE_ROOT", path) ctx = cmdio.NewContext(ctx, cmdio.Default()) args := make([]string, 0) args = append(args, "bundle", "run", key) args = append(args, params...) - c := internal.NewCobraTestRunnerWithContext(t, ctx, args...) + c := testcli.NewRunner(t, ctx, args...) stdout, _, err := c.Run() return stdout.String(), err } -func destroyBundle(t *testing.T, ctx context.Context, path string) error { +func destroyBundle(t testutil.TestingT, ctx context.Context, path string) { ctx = env.Set(ctx, "BUNDLE_ROOT", path) - c := internal.NewCobraTestRunnerWithContext(t, ctx, "bundle", "destroy", "--auto-approve") + c := testcli.NewRunner(t, ctx, "bundle", "destroy", "--auto-approve") _, _, err := c.Run() - return err + require.NoError(t, err) } -func getBundleRemoteRootPath(w *databricks.WorkspaceClient, t *testing.T, uniqueId string) string { +func getBundleRemoteRootPath(w *databricks.WorkspaceClient, t testutil.TestingT, uniqueId string) string { // Compute root path for the bundle deployment me, err := w.CurrentUser.Me(context.Background()) require.NoError(t, err) @@ -143,16 +146,19 @@ func getBundleRemoteRootPath(w *databricks.WorkspaceClient, t *testing.T, unique return root } -func blackBoxRun(t *testing.T, root string, args ...string) (stdout string, stderr string) { - cwd := vfs.MustNew(".") - gitRoot, err := vfs.FindLeafInTree(cwd, ".git") +func blackBoxRun(t testutil.TestingT, ctx context.Context, root string, args ...string) (stdout, stderr string) { + gitRoot, err := folders.FindDirWithLeaf(".", ".git") require.NoError(t, err) - t.Setenv("BUNDLE_ROOT", root) - // Create the command cmd := exec.Command("go", append([]string{"run", "main.go"}, args...)...) - cmd.Dir = gitRoot.Native() + cmd.Dir = gitRoot + + // Configure the environment + ctx = env.Set(ctx, "BUNDLE_ROOT", root) + for key, value := range env.All(ctx) { + cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", key, value)) + } // Create buffers to capture output var outBuffer, errBuffer bytes.Buffer diff --git a/integration/bundle/init_default_python_test.go b/integration/bundle/init_default_python_test.go new file mode 100644 index 000000000..9b65636e9 --- /dev/null +++ b/integration/bundle/init_default_python_test.go @@ -0,0 +1,132 @@ +package bundle_test + +import ( + "encoding/json" + "os" + "os/exec" + "path/filepath" + "testing" + + "github.com/databricks/cli/integration/internal/acc" + "github.com/databricks/cli/internal/testcli" + "github.com/databricks/cli/internal/testutil" + "github.com/databricks/cli/libs/python/pythontest" + "github.com/stretchr/testify/require" +) + +var pythonVersions = []string{ + "3.8", + "3.9", + "3.10", + "3.11", + "3.12", + "3.13", +} + +var pythonVersionsShort = []string{ + "3.9", + "3.12", +} + +var extraInstalls = map[string][]string{ + "3.12": {"setuptools"}, + "3.13": {"setuptools"}, +} + +func TestDefaultPython(t *testing.T) { + versions := pythonVersions + if testing.Short() { + versions = pythonVersionsShort + } + + for _, pythonVersion := range versions { + t.Run(pythonVersion, func(t *testing.T) { + testDefaultPython(t, pythonVersion) + }) + } +} + +func testDefaultPython(t *testing.T, pythonVersion string) { + ctx, wt := acc.WorkspaceTest(t) + + uniqueProjectId := testutil.RandomName("") + ctx, replacements := testcli.WithReplacementsMap(ctx) + replacements.Set(uniqueProjectId, "$UNIQUE_PRJ") + + user, err := wt.W.CurrentUser.Me(ctx) + require.NoError(t, err) + require.NotNil(t, user) + testcli.PrepareReplacementsUser(t, replacements, *user) + testcli.PrepareReplacements(t, replacements, wt.W) + + tmpDir := t.TempDir() + testutil.Chdir(t, tmpDir) + + opts := pythontest.VenvOpts{ + PythonVersion: pythonVersion, + Dir: tmpDir, + } + + pythontest.RequireActivatedPythonEnv(t, ctx, &opts) + extras, ok := extraInstalls[pythonVersion] + if ok { + args := append([]string{"pip", "install", "--python", opts.PythonExe}, extras...) + cmd := exec.Command("uv", args...) + require.NoError(t, cmd.Run()) + } + + projectName := "project_name_" + uniqueProjectId + + initConfig := map[string]string{ + "project_name": projectName, + "include_notebook": "yes", + "include_python": "yes", + "include_dlt": "yes", + } + b, err := json.Marshal(initConfig) + require.NoError(t, err) + err = os.WriteFile(filepath.Join(tmpDir, "config.json"), b, 0o644) + require.NoError(t, err) + + testcli.AssertOutput( + t, + ctx, + []string{"bundle", "init", "default-python", "--config-file", "config.json"}, + testutil.TestData("testdata/default_python/bundle_init.txt"), + ) + testutil.Chdir(t, projectName) + + t.Cleanup(func() { + // Delete the stack + testcli.RequireSuccessfulRun(t, ctx, "bundle", "destroy", "--auto-approve") + }) + + testcli.AssertOutput( + t, + ctx, + []string{"bundle", "validate"}, + testutil.TestData("testdata/default_python/bundle_validate.txt"), + ) + testcli.AssertOutput( + t, + ctx, + []string{"bundle", "deploy"}, + testutil.TestData("testdata/default_python/bundle_deploy.txt"), + ) + + testcli.AssertOutputJQ( + t, + ctx, + []string{"bundle", "summary", "--output", "json"}, + testutil.TestData("testdata/default_python/bundle_summary.txt"), + []string{ + "/bundle/terraform/exec_path", + "/resources/jobs/project_name_$UNIQUE_PRJ_job/email_notifications", + "/resources/jobs/project_name_$UNIQUE_PRJ_job/job_clusters/0/new_cluster/node_type_id", + "/resources/jobs/project_name_$UNIQUE_PRJ_job/url", + "/resources/pipelines/project_name_$UNIQUE_PRJ_pipeline/catalog", + "/resources/pipelines/project_name_$UNIQUE_PRJ_pipeline/url", + "/workspace/current_user", + }, + ) +} diff --git a/internal/init_test.go b/integration/bundle/init_test.go similarity index 68% rename from internal/init_test.go rename to integration/bundle/init_test.go index 25bfc19da..f5c263ca3 100644 --- a/internal/init_test.go +++ b/integration/bundle/init_test.go @@ -1,4 +1,4 @@ -package internal +package bundle_test import ( "context" @@ -11,18 +11,18 @@ import ( "testing" "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/integration/internal/acc" + "github.com/databricks/cli/internal/testcli" "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/iamutil" - "github.com/databricks/databricks-sdk-go" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) -func TestAccBundleInitErrorOnUnknownFields(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) - +func TestBundleInitErrorOnUnknownFields(t *testing.T) { + ctx := context.Background() tmpDir := t.TempDir() - _, _, err := RequireErrorRun(t, "bundle", "init", "./testdata/init/field-does-not-exist", "--output-dir", tmpDir) + _, _, err := testcli.RequireErrorRun(t, ctx, "bundle", "init", "./testdata/init/field-does-not-exist", "--output-dir", tmpDir) assert.EqualError(t, err, "failed to compute file content for bar.tmpl. variable \"does_not_exist\" not defined") } @@ -38,17 +38,15 @@ func TestAccBundleInitErrorOnUnknownFields(t *testing.T) { // 2. While rare and to be avoided if possible, the CLI reserves the right to // make changes that can break the MLOps Stacks DAB. In which case we should // skip this test until the MLOps Stacks DAB is updated to work again. -func TestAccBundleInitOnMlopsStacks(t *testing.T) { - t.Parallel() - env := testutil.GetCloud(t).String() +func TestBundleInitOnMlopsStacks(t *testing.T) { + ctx, wt := acc.WorkspaceTest(t) + w := wt.W tmpDir1 := t.TempDir() tmpDir2 := t.TempDir() - w, err := databricks.NewWorkspaceClient(&databricks.Config{}) - require.NoError(t, err) - - projectName := RandomName("project_name_") + projectName := testutil.RandomName("project_name_") + env := testutil.GetCloud(t).String() // Create a config file with the project name and root dir initConfig := map[string]string{ @@ -59,29 +57,30 @@ func TestAccBundleInitOnMlopsStacks(t *testing.T) { } b, err := json.Marshal(initConfig) require.NoError(t, err) - os.WriteFile(filepath.Join(tmpDir1, "config.json"), b, 0644) + err = os.WriteFile(filepath.Join(tmpDir1, "config.json"), b, 0o644) + require.NoError(t, err) // Run bundle init assert.NoFileExists(t, filepath.Join(tmpDir2, "repo_name", projectName, "README.md")) - RequireSuccessfulRun(t, "bundle", "init", "mlops-stacks", "--output-dir", tmpDir2, "--config-file", filepath.Join(tmpDir1, "config.json")) + testcli.RequireSuccessfulRun(t, ctx, "bundle", "init", "mlops-stacks", "--output-dir", tmpDir2, "--config-file", filepath.Join(tmpDir1, "config.json")) // Assert that the README.md file was created - assert.FileExists(t, filepath.Join(tmpDir2, "repo_name", projectName, "README.md")) - assertLocalFileContents(t, filepath.Join(tmpDir2, "repo_name", projectName, "README.md"), fmt.Sprintf("# %s", projectName)) + contents := testutil.ReadFile(t, filepath.Join(tmpDir2, "repo_name", projectName, "README.md")) + assert.Contains(t, contents, fmt.Sprintf("# %s", projectName)) // Validate the stack testutil.Chdir(t, filepath.Join(tmpDir2, "repo_name", projectName)) - RequireSuccessfulRun(t, "bundle", "validate") + testcli.RequireSuccessfulRun(t, ctx, "bundle", "validate") // Deploy the stack - RequireSuccessfulRun(t, "bundle", "deploy") + testcli.RequireSuccessfulRun(t, ctx, "bundle", "deploy") t.Cleanup(func() { // Delete the stack - RequireSuccessfulRun(t, "bundle", "destroy", "--auto-approve") + testcli.RequireSuccessfulRun(t, ctx, "bundle", "destroy", "--auto-approve") }) // Get summary of the bundle deployment - stdout, _ := RequireSuccessfulRun(t, "bundle", "summary", "--output", "json") + stdout, _ := testcli.RequireSuccessfulRun(t, ctx, "bundle", "summary", "--output", "json") summary := &config.Root{} err = json.Unmarshal(stdout.Bytes(), summary) require.NoError(t, err) @@ -100,24 +99,23 @@ func TestAccBundleInitOnMlopsStacks(t *testing.T) { assert.Contains(t, job.Settings.Name, fmt.Sprintf("dev-%s-batch-inference-job", projectName)) } -func TestAccBundleInitHelpers(t *testing.T) { - env := GetEnvOrSkipTest(t, "CLOUD_ENV") - t.Log(env) +func TestBundleInitHelpers(t *testing.T) { + ctx, wt := acc.WorkspaceTest(t) + w := wt.W - w, err := databricks.NewWorkspaceClient(&databricks.Config{}) - require.NoError(t, err) - - me, err := w.CurrentUser.Me(context.Background()) + me, err := w.CurrentUser.Me(ctx) require.NoError(t, err) var smallestNode string - switch env { - case "azure": + switch testutil.GetCloud(t) { + case testutil.Azure: smallestNode = "Standard_D3_v2" - case "gcp": + case testutil.GCP: smallestNode = "n1-standard-4" - default: + case testutil.AWS: smallestNode = "i3.xlarge" + default: + t.Fatal("Unknown cloud environment") } tests := []struct { @@ -151,17 +149,18 @@ func TestAccBundleInitHelpers(t *testing.T) { tmpDir := t.TempDir() tmpDir2 := t.TempDir() - err := os.Mkdir(filepath.Join(tmpDir, "template"), 0755) + err := os.Mkdir(filepath.Join(tmpDir, "template"), 0o755) require.NoError(t, err) - err = os.WriteFile(filepath.Join(tmpDir, "template", "foo.txt.tmpl"), []byte(test.funcName), 0644) + err = os.WriteFile(filepath.Join(tmpDir, "template", "foo.txt.tmpl"), []byte(test.funcName), 0o644) require.NoError(t, err) - err = os.WriteFile(filepath.Join(tmpDir, "databricks_template_schema.json"), []byte("{}"), 0644) + err = os.WriteFile(filepath.Join(tmpDir, "databricks_template_schema.json"), []byte("{}"), 0o644) require.NoError(t, err) // Run bundle init. - RequireSuccessfulRun(t, "bundle", "init", tmpDir, "--output-dir", tmpDir2) + testcli.RequireSuccessfulRun(t, ctx, "bundle", "init", tmpDir, "--output-dir", tmpDir2) // Assert that the helper function was correctly computed. - assertLocalFileContents(t, filepath.Join(tmpDir2, "foo.txt"), test.expected) + contents := testutil.ReadFile(t, filepath.Join(tmpDir2, "foo.txt")) + assert.Contains(t, contents, test.expected) } } diff --git a/internal/bundle/job_metadata_test.go b/integration/bundle/job_metadata_test.go similarity index 82% rename from internal/bundle/job_metadata_test.go rename to integration/bundle/job_metadata_test.go index 21f1086ae..a7290c6e3 100644 --- a/internal/bundle/job_metadata_test.go +++ b/integration/bundle/job_metadata_test.go @@ -1,4 +1,4 @@ -package bundle +package bundle_test import ( "context" @@ -10,36 +10,32 @@ import ( "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/metadata" - "github.com/databricks/cli/internal" - "github.com/databricks/cli/internal/acc" - "github.com/databricks/cli/libs/env" + "github.com/databricks/cli/integration/internal/acc" + "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/filer" "github.com/google/uuid" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) -func TestAccJobsMetadataFile(t *testing.T) { +func TestJobsMetadataFile(t *testing.T) { ctx, wt := acc.WorkspaceTest(t) w := wt.W - nodeTypeId := internal.GetNodeTypeId(env.Get(ctx, "CLOUD_ENV")) + nodeTypeId := testutil.GetCloud(t).NodeTypeID() uniqueId := uuid.New().String() - bundleRoot, err := initTestTemplate(t, ctx, "job_metadata", map[string]any{ + bundleRoot := initTestTemplate(t, ctx, "job_metadata", map[string]any{ "unique_id": uniqueId, "node_type_id": nodeTypeId, "spark_version": defaultSparkVersion, }) - require.NoError(t, err) // deploy bundle - err = deployBundle(t, ctx, bundleRoot) - require.NoError(t, err) + deployBundle(t, ctx, bundleRoot) // Cleanup the deployed bundle t.Cleanup(func() { - err = destroyBundle(t, ctx, bundleRoot) - require.NoError(t, err) + destroyBundle(t, ctx, bundleRoot) }) // assert job 1 is created diff --git a/internal/bundle/local_state_staleness_test.go b/integration/bundle/local_state_staleness_test.go similarity index 68% rename from internal/bundle/local_state_staleness_test.go rename to integration/bundle/local_state_staleness_test.go index d11234667..398481504 100644 --- a/internal/bundle/local_state_staleness_test.go +++ b/integration/bundle/local_state_staleness_test.go @@ -1,12 +1,11 @@ -package bundle +package bundle_test import ( "context" "testing" - "github.com/databricks/cli/internal" - "github.com/databricks/cli/internal/acc" - "github.com/databricks/cli/libs/env" + "github.com/databricks/cli/integration/internal/acc" + "github.com/databricks/cli/internal/testutil" "github.com/databricks/databricks-sdk-go/listing" "github.com/databricks/databricks-sdk-go/service/jobs" "github.com/google/uuid" @@ -14,7 +13,7 @@ import ( "github.com/stretchr/testify/require" ) -func TestAccLocalStateStaleness(t *testing.T) { +func TestLocalStateStaleness(t *testing.T) { ctx, wt := acc.WorkspaceTest(t) w := wt.W @@ -25,19 +24,17 @@ func TestAccLocalStateStaleness(t *testing.T) { // Because of deploy (2), the locally cached state of bundle instance A should be stale. // Then for deploy (3), it must use the remote state over the stale local state. - nodeTypeId := internal.GetNodeTypeId(env.Get(ctx, "CLOUD_ENV")) + nodeTypeId := testutil.GetCloud(t).NodeTypeID() uniqueId := uuid.New().String() initialize := func() string { - root, err := initTestTemplate(t, ctx, "basic", map[string]any{ + root := initTestTemplate(t, ctx, "basic", map[string]any{ "unique_id": uniqueId, "node_type_id": nodeTypeId, "spark_version": defaultSparkVersion, }) - require.NoError(t, err) t.Cleanup(func() { - err = destroyBundle(t, ctx, root) - require.NoError(t, err) + destroyBundle(t, ctx, root) }) return root @@ -49,16 +46,13 @@ func TestAccLocalStateStaleness(t *testing.T) { bundleB := initialize() // 1) Deploy bundle A - err = deployBundle(t, ctx, bundleA) - require.NoError(t, err) + deployBundle(t, ctx, bundleA) // 2) Deploy bundle B - err = deployBundle(t, ctx, bundleB) - require.NoError(t, err) + deployBundle(t, ctx, bundleB) // 3) Deploy bundle A again - err = deployBundle(t, ctx, bundleA) - require.NoError(t, err) + deployBundle(t, ctx, bundleA) // Assert that there is only a single job in the workspace corresponding to this bundle. iter := w.Jobs.List(context.Background(), jobs.ListJobsRequest{ diff --git a/integration/bundle/main_test.go b/integration/bundle/main_test.go new file mode 100644 index 000000000..1c44d0aaf --- /dev/null +++ b/integration/bundle/main_test.go @@ -0,0 +1,13 @@ +package bundle_test + +import ( + "testing" + + "github.com/databricks/cli/integration/internal" +) + +// TestMain is the entrypoint executed by the test runner. +// See [internal.Main] for prerequisites for running integration tests. +func TestMain(m *testing.M) { + internal.Main(m) +} diff --git a/internal/bundle/python_wheel_test.go b/integration/bundle/python_wheel_test.go similarity index 66% rename from internal/bundle/python_wheel_test.go rename to integration/bundle/python_wheel_test.go index 846f14177..62846f7b5 100644 --- a/internal/bundle/python_wheel_test.go +++ b/integration/bundle/python_wheel_test.go @@ -1,37 +1,39 @@ -package bundle +package bundle_test import ( "testing" - "github.com/databricks/cli/internal" - "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/integration/internal/acc" "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/env" "github.com/google/uuid" "github.com/stretchr/testify/require" ) -func runPythonWheelTest(t *testing.T, templateName string, sparkVersion string, pythonWheelWrapper bool) { +func runPythonWheelTest(t *testing.T, templateName, sparkVersion string, pythonWheelWrapper bool) { ctx, _ := acc.WorkspaceTest(t) - nodeTypeId := internal.GetNodeTypeId(env.Get(ctx, "CLOUD_ENV")) + nodeTypeId := testutil.GetCloud(t).NodeTypeID() instancePoolId := env.Get(ctx, "TEST_INSTANCE_POOL_ID") - bundleRoot, err := initTestTemplate(t, ctx, templateName, map[string]any{ + bundleRoot := initTestTemplate(t, ctx, templateName, map[string]any{ "node_type_id": nodeTypeId, "unique_id": uuid.New().String(), "spark_version": sparkVersion, "python_wheel_wrapper": pythonWheelWrapper, "instance_pool_id": instancePoolId, }) - require.NoError(t, err) - err = deployBundle(t, ctx, bundleRoot) - require.NoError(t, err) + deployBundle(t, ctx, bundleRoot) t.Cleanup(func() { destroyBundle(t, ctx, bundleRoot) }) + if testing.Short() { + t.Log("Skip the job run in short mode") + return + } + out, err := runResource(t, ctx, bundleRoot, "some_other_job") require.NoError(t, err) require.Contains(t, out, "Hello from my func") @@ -45,18 +47,16 @@ func runPythonWheelTest(t *testing.T, templateName string, sparkVersion string, require.Contains(t, out, "['my_test_code', 'param1', 'param2']") } -func TestAccPythonWheelTaskDeployAndRunWithoutWrapper(t *testing.T) { +func TestPythonWheelTaskDeployAndRunWithoutWrapper(t *testing.T) { runPythonWheelTest(t, "python_wheel_task", "13.3.x-snapshot-scala2.12", false) } -func TestAccPythonWheelTaskDeployAndRunWithWrapper(t *testing.T) { +func TestPythonWheelTaskDeployAndRunWithWrapper(t *testing.T) { runPythonWheelTest(t, "python_wheel_task", "12.2.x-scala2.12", true) } -func TestAccPythonWheelTaskDeployAndRunOnInteractiveCluster(t *testing.T) { - _, wt := acc.WorkspaceTest(t) - - if testutil.IsAWSCloud(wt.T) { +func TestPythonWheelTaskDeployAndRunOnInteractiveCluster(t *testing.T) { + if testutil.GetCloud(t) == testutil.AWS { t.Skip("Skipping test for AWS cloud because it is not permitted to create clusters") } diff --git a/internal/bundle/spark_jar_test.go b/integration/bundle/spark_jar_test.go similarity index 78% rename from internal/bundle/spark_jar_test.go rename to integration/bundle/spark_jar_test.go index 4b469617c..cbdf5a00c 100644 --- a/internal/bundle/spark_jar_test.go +++ b/integration/bundle/spark_jar_test.go @@ -1,23 +1,21 @@ -package bundle +package bundle_test import ( "context" "testing" - "github.com/databricks/cli/internal" - "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/integration/internal/acc" "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/env" "github.com/google/uuid" "github.com/stretchr/testify/require" ) -func runSparkJarTestCommon(t *testing.T, ctx context.Context, sparkVersion string, artifactPath string) { - cloudEnv := internal.GetEnvOrSkipTest(t, "CLOUD_ENV") - nodeTypeId := internal.GetNodeTypeId(cloudEnv) +func runSparkJarTestCommon(t *testing.T, ctx context.Context, sparkVersion, artifactPath string) { + nodeTypeId := testutil.GetCloud(t).NodeTypeID() tmpDir := t.TempDir() instancePoolId := env.Get(ctx, "TEST_INSTANCE_POOL_ID") - bundleRoot, err := initTestTemplateWithBundleRoot(t, ctx, "spark_jar_task", map[string]any{ + bundleRoot := initTestTemplateWithBundleRoot(t, ctx, "spark_jar_task", map[string]any{ "node_type_id": nodeTypeId, "unique_id": uuid.New().String(), "spark_version": sparkVersion, @@ -25,15 +23,18 @@ func runSparkJarTestCommon(t *testing.T, ctx context.Context, sparkVersion strin "artifact_path": artifactPath, "instance_pool_id": instancePoolId, }, tmpDir) - require.NoError(t, err) - err = deployBundle(t, ctx, bundleRoot) - require.NoError(t, err) + deployBundle(t, ctx, bundleRoot) t.Cleanup(func() { destroyBundle(t, ctx, bundleRoot) }) + if testing.Short() { + t.Log("Skip the job run in short mode") + return + } + out, err := runResource(t, ctx, bundleRoot, "jar_job") require.NoError(t, err) require.Contains(t, out, "Hello from Jar!") @@ -41,7 +42,7 @@ func runSparkJarTestCommon(t *testing.T, ctx context.Context, sparkVersion strin func runSparkJarTestFromVolume(t *testing.T, sparkVersion string) { ctx, wt := acc.UcWorkspaceTest(t) - volumePath := internal.TemporaryUcVolume(t, wt.W) + volumePath := acc.TemporaryVolume(wt) ctx = env.Set(ctx, "DATABRICKS_BUNDLE_TARGET", "volume") runSparkJarTestCommon(t, ctx, sparkVersion, volumePath) } @@ -52,8 +53,7 @@ func runSparkJarTestFromWorkspace(t *testing.T, sparkVersion string) { runSparkJarTestCommon(t, ctx, sparkVersion, "n/a") } -func TestAccSparkJarTaskDeployAndRunOnVolumes(t *testing.T) { - internal.GetEnvOrSkipTest(t, "CLOUD_ENV") +func TestSparkJarTaskDeployAndRunOnVolumes(t *testing.T) { testutil.RequireJDK(t, context.Background(), "1.8.0") // Failure on earlier DBR versions: @@ -76,8 +76,7 @@ func TestAccSparkJarTaskDeployAndRunOnVolumes(t *testing.T) { } } -func TestAccSparkJarTaskDeployAndRunOnWorkspace(t *testing.T) { - internal.GetEnvOrSkipTest(t, "CLOUD_ENV") +func TestSparkJarTaskDeployAndRunOnWorkspace(t *testing.T) { testutil.RequireJDK(t, context.Background(), "1.8.0") // Failure on earlier DBR versions: diff --git a/integration/bundle/testdata/default_python/bundle_deploy.txt b/integration/bundle/testdata/default_python/bundle_deploy.txt new file mode 100644 index 000000000..eef0b79b3 --- /dev/null +++ b/integration/bundle/testdata/default_python/bundle_deploy.txt @@ -0,0 +1,6 @@ +Building project_name_$UNIQUE_PRJ... +Uploading project_name_$UNIQUE_PRJ-0.0.1+.-py3-none-any.whl... +Uploading bundle files to /Workspace/Users/$USERNAME/.bundle/project_name_$UNIQUE_PRJ/dev/files... +Deploying resources... +Updating deployment state... +Deployment complete! diff --git a/integration/bundle/testdata/default_python/bundle_init.txt b/integration/bundle/testdata/default_python/bundle_init.txt new file mode 100644 index 000000000..6cfc32f98 --- /dev/null +++ b/integration/bundle/testdata/default_python/bundle_init.txt @@ -0,0 +1,8 @@ + +Welcome to the default Python template for Databricks Asset Bundles! +Workspace to use (auto-detected, edit in 'project_name_$UNIQUE_PRJ/databricks.yml'): https://$DATABRICKS_HOST + +✨ Your new project has been created in the 'project_name_$UNIQUE_PRJ' directory! + +Please refer to the README.md file for "getting started" instructions. +See also the documentation at https://docs.databricks.com/dev-tools/bundles/index.html. diff --git a/integration/bundle/testdata/default_python/bundle_summary.txt b/integration/bundle/testdata/default_python/bundle_summary.txt new file mode 100644 index 000000000..3143d729c --- /dev/null +++ b/integration/bundle/testdata/default_python/bundle_summary.txt @@ -0,0 +1,185 @@ +{ + "bundle": { + "name": "project_name_$UNIQUE_PRJ", + "target": "dev", + "environment": "dev", + "terraform": { + "exec_path": "/tmp/.../terraform" + }, + "git": { + "bundle_root_path": ".", + "inferred": true + }, + "mode": "development", + "deployment": { + "lock": { + "enabled": false + } + } + }, + "include": [ + "resources/project_name_$UNIQUE_PRJ.job.yml", + "resources/project_name_$UNIQUE_PRJ.pipeline.yml" + ], + "workspace": { + "host": "https://$DATABRICKS_HOST", + "current_user": { + "active": true, + "displayName": "$USERNAME", + "emails": [ + { + "primary": true, + "type": "work", + "value": "$USERNAME" + } + ], + "groups": [ + { + "$ref": "Groups/$USER.Groups[0]", + "display": "team.engineering", + "type": "direct", + "value": "$USER.Groups[0]" + } + ], + "id": "$USER.Id", + "name": { + "familyName": "$USERNAME", + "givenName": "$USERNAME" + }, + "schemas": [ + "urn:ietf:params:scim:schemas:core:2.0:User", + "urn:ietf:params:scim:schemas:extension:workspace:2.0:User" + ], + "short_name": "$USERNAME", + "userName": "$USERNAME" + }, + "root_path": "/Workspace/Users/$USERNAME/.bundle/project_name_$UNIQUE_PRJ/dev", + "file_path": "/Workspace/Users/$USERNAME/.bundle/project_name_$UNIQUE_PRJ/dev/files", + "resource_path": "/Workspace/Users/$USERNAME/.bundle/project_name_$UNIQUE_PRJ/dev/resources", + "artifact_path": "/Workspace/Users/$USERNAME/.bundle/project_name_$UNIQUE_PRJ/dev/artifacts", + "state_path": "/Workspace/Users/$USERNAME/.bundle/project_name_$UNIQUE_PRJ/dev/state" + }, + "resources": { + "jobs": { + "project_name_$UNIQUE_PRJ_job": { + "deployment": { + "kind": "BUNDLE", + "metadata_file_path": "/Workspace/Users/$USERNAME/.bundle/project_name_$UNIQUE_PRJ/dev/state/metadata.json" + }, + "edit_mode": "UI_LOCKED", + "email_notifications": { + "on_failure": [ + "$USERNAME" + ] + }, + "format": "MULTI_TASK", + "id": "", + "job_clusters": [ + { + "job_cluster_key": "job_cluster", + "new_cluster": { + "autoscale": { + "max_workers": 4, + "min_workers": 1 + }, + "node_type_id": "i3.xlarge", + "spark_version": "15.4.x-scala2.12" + } + } + ], + "max_concurrent_runs": 4, + "name": "[dev $USERNAME] project_name_$UNIQUE_PRJ_job", + "queue": { + "enabled": true + }, + "tags": { + "dev": "$USERNAME" + }, + "tasks": [ + { + "job_cluster_key": "job_cluster", + "notebook_task": { + "notebook_path": "/Workspace/Users/$USERNAME/.bundle/project_name_$UNIQUE_PRJ/dev/files/src/notebook" + }, + "task_key": "notebook_task" + }, + { + "depends_on": [ + { + "task_key": "notebook_task" + } + ], + "pipeline_task": { + "pipeline_id": "${resources.pipelines.project_name_$UNIQUE_PRJ_pipeline.id}" + }, + "task_key": "refresh_pipeline" + }, + { + "depends_on": [ + { + "task_key": "refresh_pipeline" + } + ], + "job_cluster_key": "job_cluster", + "libraries": [ + { + "whl": "dist/*.whl" + } + ], + "python_wheel_task": { + "entry_point": "main", + "package_name": "project_name_$UNIQUE_PRJ" + }, + "task_key": "main_task" + } + ], + "trigger": { + "pause_status": "PAUSED", + "periodic": { + "interval": 1, + "unit": "DAYS" + } + }, + "url": "https://$DATABRICKS_HOST/jobs/?o=" + } + }, + "pipelines": { + "project_name_$UNIQUE_PRJ_pipeline": { + "catalog": "main", + "configuration": { + "bundle.sourcePath": "/Workspace/Users/$USERNAME/.bundle/project_name_$UNIQUE_PRJ/dev/files/src" + }, + "deployment": { + "kind": "BUNDLE", + "metadata_file_path": "/Workspace/Users/$USERNAME/.bundle/project_name_$UNIQUE_PRJ/dev/state/metadata.json" + }, + "development": true, + "id": "", + "libraries": [ + { + "notebook": { + "path": "/Workspace/Users/$USERNAME/.bundle/project_name_$UNIQUE_PRJ/dev/files/src/dlt_pipeline" + } + } + ], + "name": "[dev $USERNAME] project_name_$UNIQUE_PRJ_pipeline", + "target": "project_name_$UNIQUE_PRJ_dev", + "url": "https://$DATABRICKS_HOST/pipelines/?o=" + } + } + }, + "sync": { + "paths": [ + "." + ] + }, + "presets": { + "name_prefix": "[dev $USERNAME] ", + "pipelines_development": true, + "trigger_pause_status": "PAUSED", + "jobs_max_concurrent_runs": 4, + "tags": { + "dev": "$USERNAME" + } + } +} \ No newline at end of file diff --git a/integration/bundle/testdata/default_python/bundle_validate.txt b/integration/bundle/testdata/default_python/bundle_validate.txt new file mode 100644 index 000000000..88a5fdd18 --- /dev/null +++ b/integration/bundle/testdata/default_python/bundle_validate.txt @@ -0,0 +1,8 @@ +Name: project_name_$UNIQUE_PRJ +Target: dev +Workspace: + Host: https://$DATABRICKS_HOST + User: $USERNAME + Path: /Workspace/Users/$USERNAME/.bundle/project_name_$UNIQUE_PRJ/dev + +Validation OK! diff --git a/internal/testdata/init/field-does-not-exist/databricks_template_schema.json b/integration/bundle/testdata/init/field-does-not-exist/databricks_template_schema.json similarity index 100% rename from internal/testdata/init/field-does-not-exist/databricks_template_schema.json rename to integration/bundle/testdata/init/field-does-not-exist/databricks_template_schema.json diff --git a/internal/testdata/init/field-does-not-exist/template/bar.tmpl b/integration/bundle/testdata/init/field-does-not-exist/template/bar.tmpl similarity index 100% rename from internal/testdata/init/field-does-not-exist/template/bar.tmpl rename to integration/bundle/testdata/init/field-does-not-exist/template/bar.tmpl diff --git a/internal/bundle/validate_test.go b/integration/bundle/validate_test.go similarity index 90% rename from internal/bundle/validate_test.go rename to integration/bundle/validate_test.go index 18da89e4c..2dd8ada67 100644 --- a/internal/bundle/validate_test.go +++ b/integration/bundle/validate_test.go @@ -1,8 +1,9 @@ -package bundle +package bundle_test import ( "context" "encoding/json" + "path/filepath" "testing" "github.com/databricks/cli/internal/testutil" @@ -12,11 +13,9 @@ import ( "github.com/stretchr/testify/require" ) -func TestAccBundleValidate(t *testing.T) { - testutil.GetEnvOrSkipTest(t, "CLOUD_ENV") - +func TestBundleValidate(t *testing.T) { tmpDir := t.TempDir() - testutil.WriteFile(t, + testutil.WriteFile(t, filepath.Join(tmpDir, "databricks.yml"), ` bundle: name: "foobar" @@ -33,7 +32,7 @@ resources: inner_loop: name: inner loop -`, tmpDir, "databricks.yml") +`) ctx := context.Background() stdout, err := validateBundle(t, ctx, tmpDir) diff --git a/integration/cmd/alerts/alerts_test.go b/integration/cmd/alerts/alerts_test.go new file mode 100644 index 000000000..ca1719813 --- /dev/null +++ b/integration/cmd/alerts/alerts_test.go @@ -0,0 +1,15 @@ +package alerts_test + +import ( + "context" + "testing" + + "github.com/databricks/cli/internal/testcli" + "github.com/stretchr/testify/assert" +) + +func TestAlertsCreateErrWhenNoArguments(t *testing.T) { + ctx := context.Background() + _, _, err := testcli.RequireErrorRun(t, ctx, "alerts-legacy", "create") + assert.Equal(t, "please provide command input in JSON format by specifying the --json flag", err.Error()) +} diff --git a/integration/cmd/alerts/main_test.go b/integration/cmd/alerts/main_test.go new file mode 100644 index 000000000..6987ade02 --- /dev/null +++ b/integration/cmd/alerts/main_test.go @@ -0,0 +1,13 @@ +package alerts_test + +import ( + "testing" + + "github.com/databricks/cli/integration/internal" +) + +// TestMain is the entrypoint executed by the test runner. +// See [internal.Main] for prerequisites for running integration tests. +func TestMain(m *testing.M) { + internal.Main(m) +} diff --git a/integration/cmd/api/api_test.go b/integration/cmd/api/api_test.go new file mode 100644 index 000000000..4cb9b1737 --- /dev/null +++ b/integration/cmd/api/api_test.go @@ -0,0 +1,56 @@ +package api_test + +import ( + "context" + "encoding/json" + "fmt" + "path" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + _ "github.com/databricks/cli/cmd/api" + "github.com/databricks/cli/internal/testcli" + "github.com/databricks/cli/internal/testutil" +) + +func TestApiGet(t *testing.T) { + ctx := context.Background() + + stdout, _ := testcli.RequireSuccessfulRun(t, ctx, "api", "get", "/api/2.0/preview/scim/v2/Me") + + // Deserialize SCIM API response. + var out map[string]any + err := json.Unmarshal(stdout.Bytes(), &out) + require.NoError(t, err) + + // Assert that the output somewhat makes sense for the SCIM API. + assert.Equal(t, true, out["active"]) + assert.NotNil(t, out["id"]) +} + +func TestApiPost(t *testing.T) { + ctx := context.Background() + + if testutil.GetCloud(t) == testutil.GCP { + t.Skip("DBFS REST API is disabled on gcp") + } + + dbfsPath := path.Join("/tmp/databricks/integration", testutil.RandomName("api-post")) + requestPath := filepath.Join(t.TempDir(), "body.json") + testutil.WriteFile(t, requestPath, fmt.Sprintf(`{ + "path": "%s" + }`, dbfsPath)) + + // Post to mkdir + { + testcli.RequireSuccessfulRun(t, ctx, "api", "post", "--json=@"+requestPath, "/api/2.0/dbfs/mkdirs") + } + + // Post to delete + { + testcli.RequireSuccessfulRun(t, ctx, "api", "post", "--json=@"+requestPath, "/api/2.0/dbfs/delete") + } +} diff --git a/integration/cmd/api/main_test.go b/integration/cmd/api/main_test.go new file mode 100644 index 000000000..70d021790 --- /dev/null +++ b/integration/cmd/api/main_test.go @@ -0,0 +1,13 @@ +package api_test + +import ( + "testing" + + "github.com/databricks/cli/integration/internal" +) + +// TestMain is the entrypoint executed by the test runner. +// See [internal.Main] for prerequisites for running integration tests. +func TestMain(m *testing.M) { + internal.Main(m) +} diff --git a/internal/auth_describe_test.go b/integration/cmd/auth/describe_test.go similarity index 74% rename from internal/auth_describe_test.go rename to integration/cmd/auth/describe_test.go index 90b5d6801..41288dce6 100644 --- a/internal/auth_describe_test.go +++ b/integration/cmd/auth/describe_test.go @@ -1,18 +1,20 @@ -package internal +package auth_test import ( "context" "fmt" "testing" + "github.com/databricks/cli/internal/testcli" "github.com/databricks/databricks-sdk-go" "github.com/stretchr/testify/require" ) func TestAuthDescribeSuccess(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) + t.Skipf("Skipping because of https://github.com/databricks/cli/issues/2010") - stdout, _ := RequireSuccessfulRun(t, "auth", "describe") + ctx := context.Background() + stdout, _ := testcli.RequireSuccessfulRun(t, ctx, "auth", "describe") outStr := stdout.String() w, err := databricks.NewWorkspaceClient(&databricks.Config{}) @@ -31,9 +33,10 @@ func TestAuthDescribeSuccess(t *testing.T) { } func TestAuthDescribeFailure(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) + t.Skipf("Skipping because of https://github.com/databricks/cli/issues/2010") - stdout, _ := RequireSuccessfulRun(t, "auth", "describe", "--profile", "nonexistent") + ctx := context.Background() + stdout, _ := testcli.RequireSuccessfulRun(t, ctx, "auth", "describe", "--profile", "nonexistent") outStr := stdout.String() require.NotEmpty(t, outStr) diff --git a/integration/cmd/auth/main_test.go b/integration/cmd/auth/main_test.go new file mode 100644 index 000000000..97b1d740b --- /dev/null +++ b/integration/cmd/auth/main_test.go @@ -0,0 +1,13 @@ +package auth_test + +import ( + "testing" + + "github.com/databricks/cli/integration/internal" +) + +// TestMain is the entrypoint executed by the test runner. +// See [internal.Main] for prerequisites for running integration tests. +func TestMain(m *testing.M) { + internal.Main(m) +} diff --git a/integration/cmd/clusters/clusters_test.go b/integration/cmd/clusters/clusters_test.go new file mode 100644 index 000000000..4e20a0558 --- /dev/null +++ b/integration/cmd/clusters/clusters_test.go @@ -0,0 +1,63 @@ +package clusters_test + +import ( + "context" + "fmt" + "regexp" + "testing" + + "github.com/databricks/cli/integration/internal/acc" + "github.com/databricks/cli/internal/testcli" + "github.com/databricks/databricks-sdk-go/listing" + "github.com/databricks/databricks-sdk-go/service/compute" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestClustersList(t *testing.T) { + ctx := context.Background() + stdout, stderr := testcli.RequireSuccessfulRun(t, ctx, "clusters", "list") + outStr := stdout.String() + assert.Contains(t, outStr, "ID") + assert.Contains(t, outStr, "Name") + assert.Contains(t, outStr, "State") + assert.Equal(t, "", stderr.String()) + + idRegExp := regexp.MustCompile(`[0-9]{4}\-[0-9]{6}-[a-z0-9]{8}`) + clusterId := idRegExp.FindString(outStr) + assert.NotEmpty(t, clusterId) +} + +func TestClustersGet(t *testing.T) { + ctx := context.Background() + clusterId := findValidClusterID(t) + stdout, stderr := testcli.RequireSuccessfulRun(t, ctx, "clusters", "get", clusterId) + outStr := stdout.String() + assert.Contains(t, outStr, fmt.Sprintf(`"cluster_id":"%s"`, clusterId)) + assert.Equal(t, "", stderr.String()) +} + +func TestClusterCreateErrorWhenNoArguments(t *testing.T) { + ctx := context.Background() + _, _, err := testcli.RequireErrorRun(t, ctx, "clusters", "create") + assert.Contains(t, err.Error(), "accepts 1 arg(s), received 0") +} + +// findValidClusterID lists clusters in the workspace to find a valid cluster ID. +func findValidClusterID(t *testing.T) string { + ctx, wt := acc.WorkspaceTest(t) + it := wt.W.Clusters.List(ctx, compute.ListClustersRequest{ + FilterBy: &compute.ListClustersFilterBy{ + ClusterSources: []compute.ClusterSource{ + compute.ClusterSourceApi, + compute.ClusterSourceUi, + }, + }, + }) + + clusterIDs, err := listing.ToSliceN(ctx, it, 1) + require.NoError(t, err) + require.Len(t, clusterIDs, 1) + + return clusterIDs[0].ClusterId +} diff --git a/integration/cmd/clusters/main_test.go b/integration/cmd/clusters/main_test.go new file mode 100644 index 000000000..ccd5660e7 --- /dev/null +++ b/integration/cmd/clusters/main_test.go @@ -0,0 +1,13 @@ +package clusters_test + +import ( + "testing" + + "github.com/databricks/cli/integration/internal" +) + +// TestMain is the entrypoint executed by the test runner. +// See [internal.Main] for prerequisites for running integration tests. +func TestMain(m *testing.M) { + internal.Main(m) +} diff --git a/internal/fs_cat_test.go b/integration/cmd/fs/cat_test.go similarity index 57% rename from internal/fs_cat_test.go rename to integration/cmd/fs/cat_test.go index 6292aef18..3e964fe6e 100644 --- a/internal/fs_cat_test.go +++ b/integration/cmd/fs/cat_test.go @@ -1,4 +1,4 @@ -package internal +package fs_test import ( "context" @@ -7,13 +7,14 @@ import ( "strings" "testing" + "github.com/databricks/cli/integration/internal/acc" + "github.com/databricks/cli/internal/testcli" "github.com/databricks/cli/libs/filer" - "github.com/databricks/databricks-sdk-go" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) -func TestAccFsCat(t *testing.T) { +func TestFsCat(t *testing.T) { t.Parallel() for _, testCase := range fsTests { @@ -22,18 +23,20 @@ func TestAccFsCat(t *testing.T) { t.Run(tc.name, func(t *testing.T) { t.Parallel() + ctx := context.Background() f, tmpDir := tc.setupFiler(t) + err := f.Write(context.Background(), "hello.txt", strings.NewReader("abcd"), filer.CreateParentDirectories) require.NoError(t, err) - stdout, stderr := RequireSuccessfulRun(t, "fs", "cat", path.Join(tmpDir, "hello.txt")) + stdout, stderr := testcli.RequireSuccessfulRun(t, ctx, "fs", "cat", path.Join(tmpDir, "hello.txt")) assert.Equal(t, "", stderr.String()) assert.Equal(t, "abcd", stdout.String()) }) } } -func TestAccFsCatOnADir(t *testing.T) { +func TestFsCatOnADir(t *testing.T) { t.Parallel() for _, testCase := range fsTests { @@ -42,17 +45,19 @@ func TestAccFsCatOnADir(t *testing.T) { t.Run(tc.name, func(t *testing.T) { t.Parallel() + ctx := context.Background() f, tmpDir := tc.setupFiler(t) + err := f.Mkdir(context.Background(), "dir1") require.NoError(t, err) - _, _, err = RequireErrorRun(t, "fs", "cat", path.Join(tmpDir, "dir1")) + _, _, err = testcli.RequireErrorRun(t, ctx, "fs", "cat", path.Join(tmpDir, "dir1")) assert.ErrorAs(t, err, &filer.NotAFile{}) }) } } -func TestAccFsCatOnNonExistentFile(t *testing.T) { +func TestFsCatOnNonExistentFile(t *testing.T) { t.Parallel() for _, testCase := range fsTests { @@ -61,36 +66,32 @@ func TestAccFsCatOnNonExistentFile(t *testing.T) { t.Run(tc.name, func(t *testing.T) { t.Parallel() + ctx := context.Background() _, tmpDir := tc.setupFiler(t) - _, _, err := RequireErrorRun(t, "fs", "cat", path.Join(tmpDir, "non-existent-file")) + _, _, err := testcli.RequireErrorRun(t, ctx, "fs", "cat", path.Join(tmpDir, "non-existent-file")) assert.ErrorIs(t, err, fs.ErrNotExist) }) } } -func TestAccFsCatForDbfsInvalidScheme(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) - - _, _, err := RequireErrorRun(t, "fs", "cat", "dab:/non-existent-file") +func TestFsCatForDbfsInvalidScheme(t *testing.T) { + ctx := context.Background() + _, _, err := testcli.RequireErrorRun(t, ctx, "fs", "cat", "dab:/non-existent-file") assert.ErrorContains(t, err, "invalid scheme: dab") } -func TestAccFsCatDoesNotSupportOutputModeJson(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) - - ctx := context.Background() - w, err := databricks.NewWorkspaceClient() - require.NoError(t, err) - - tmpDir := TemporaryDbfsDir(t, w) +func TestFsCatDoesNotSupportOutputModeJson(t *testing.T) { + ctx, wt := acc.WorkspaceTest(t) + w := wt.W + tmpDir := acc.TemporaryDbfsDir(wt, "fs-cat-") f, err := filer.NewDbfsClient(w, tmpDir) require.NoError(t, err) err = f.Write(ctx, "hello.txt", strings.NewReader("abc")) require.NoError(t, err) - _, _, err = RequireErrorRun(t, "fs", "cat", "dbfs:"+path.Join(tmpDir, "hello.txt"), "--output=json") + _, _, err = testcli.RequireErrorRun(t, ctx, "fs", "cat", "dbfs:"+path.Join(tmpDir, "hello.txt"), "--output=json") assert.ErrorContains(t, err, "json output not supported") } diff --git a/internal/completer_test.go b/integration/cmd/fs/completion_test.go similarity index 72% rename from internal/completer_test.go rename to integration/cmd/fs/completion_test.go index b2c936886..88ce2fcc1 100644 --- a/internal/completer_test.go +++ b/integration/cmd/fs/completion_test.go @@ -1,4 +1,4 @@ -package internal +package fs_test import ( "context" @@ -7,6 +7,7 @@ import ( "testing" _ "github.com/databricks/cli/cmd/fs" + "github.com/databricks/cli/internal/testcli" "github.com/databricks/cli/libs/filer" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -17,11 +18,12 @@ func setupCompletionFile(t *testing.T, f filer.Filer) { require.NoError(t, err) } -func TestAccFsCompletion(t *testing.T) { +func TestFsCompletion(t *testing.T) { + ctx := context.Background() f, tmpDir := setupDbfsFiler(t) setupCompletionFile(t, f) - stdout, _ := RequireSuccessfulRun(t, "__complete", "fs", "ls", tmpDir+"/") + stdout, _ := testcli.RequireSuccessfulRun(t, ctx, "__complete", "fs", "ls", tmpDir+"/") expectedOutput := fmt.Sprintf("%s/dir1/\n:2\n", tmpDir) assert.Equal(t, expectedOutput, stdout.String()) } diff --git a/internal/fs_cp_test.go b/integration/cmd/fs/cp_test.go similarity index 76% rename from internal/fs_cp_test.go rename to integration/cmd/fs/cp_test.go index b69735bc0..76aef7acf 100644 --- a/internal/fs_cp_test.go +++ b/integration/cmd/fs/cp_test.go @@ -1,4 +1,4 @@ -package internal +package fs_test import ( "context" @@ -10,6 +10,8 @@ import ( "strings" "testing" + "github.com/databricks/cli/internal/testcli" + "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/filer" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -61,8 +63,8 @@ func assertTargetDir(t *testing.T, ctx context.Context, f filer.Filer) { type cpTest struct { name string - setupSource func(*testing.T) (filer.Filer, string) - setupTarget func(*testing.T) (filer.Filer, string) + setupSource func(testutil.TestingT) (filer.Filer, string) + setupTarget func(testutil.TestingT) (filer.Filer, string) } func copyTests() []cpTest { @@ -120,7 +122,7 @@ func copyTests() []cpTest { } } -func TestAccFsCpDir(t *testing.T) { +func TestFsCpDir(t *testing.T) { t.Parallel() for _, testCase := range copyTests() { @@ -129,18 +131,19 @@ func TestAccFsCpDir(t *testing.T) { t.Run(tc.name, func(t *testing.T) { t.Parallel() + ctx := context.Background() sourceFiler, sourceDir := tc.setupSource(t) targetFiler, targetDir := tc.setupTarget(t) setupSourceDir(t, context.Background(), sourceFiler) - RequireSuccessfulRun(t, "fs", "cp", sourceDir, targetDir, "--recursive") + testcli.RequireSuccessfulRun(t, ctx, "fs", "cp", sourceDir, targetDir, "--recursive") assertTargetDir(t, context.Background(), targetFiler) }) } } -func TestAccFsCpFileToFile(t *testing.T) { +func TestFsCpFileToFile(t *testing.T) { t.Parallel() for _, testCase := range copyTests() { @@ -149,18 +152,19 @@ func TestAccFsCpFileToFile(t *testing.T) { t.Run(tc.name, func(t *testing.T) { t.Parallel() + ctx := context.Background() sourceFiler, sourceDir := tc.setupSource(t) targetFiler, targetDir := tc.setupTarget(t) setupSourceFile(t, context.Background(), sourceFiler) - RequireSuccessfulRun(t, "fs", "cp", path.Join(sourceDir, "foo.txt"), path.Join(targetDir, "bar.txt")) + testcli.RequireSuccessfulRun(t, ctx, "fs", "cp", path.Join(sourceDir, "foo.txt"), path.Join(targetDir, "bar.txt")) assertTargetFile(t, context.Background(), targetFiler, "bar.txt") }) } } -func TestAccFsCpFileToDir(t *testing.T) { +func TestFsCpFileToDir(t *testing.T) { t.Parallel() for _, testCase := range copyTests() { @@ -169,18 +173,19 @@ func TestAccFsCpFileToDir(t *testing.T) { t.Run(tc.name, func(t *testing.T) { t.Parallel() + ctx := context.Background() sourceFiler, sourceDir := tc.setupSource(t) targetFiler, targetDir := tc.setupTarget(t) setupSourceFile(t, context.Background(), sourceFiler) - RequireSuccessfulRun(t, "fs", "cp", path.Join(sourceDir, "foo.txt"), targetDir) + testcli.RequireSuccessfulRun(t, ctx, "fs", "cp", path.Join(sourceDir, "foo.txt"), targetDir) assertTargetFile(t, context.Background(), targetFiler, "foo.txt") }) } } -func TestAccFsCpFileToDirForWindowsPaths(t *testing.T) { +func TestFsCpFileToDirForWindowsPaths(t *testing.T) { if runtime.GOOS != "windows" { t.Skip("Skipping test on non-windows OS") } @@ -192,11 +197,11 @@ func TestAccFsCpFileToDirForWindowsPaths(t *testing.T) { windowsPath := filepath.Join(filepath.FromSlash(sourceDir), "foo.txt") - RequireSuccessfulRun(t, "fs", "cp", windowsPath, targetDir) + testcli.RequireSuccessfulRun(t, ctx, "fs", "cp", windowsPath, targetDir) assertTargetFile(t, ctx, targetFiler, "foo.txt") } -func TestAccFsCpDirToDirFileNotOverwritten(t *testing.T) { +func TestFsCpDirToDirFileNotOverwritten(t *testing.T) { t.Parallel() for _, testCase := range copyTests() { @@ -205,6 +210,7 @@ func TestAccFsCpDirToDirFileNotOverwritten(t *testing.T) { t.Run(tc.name, func(t *testing.T) { t.Parallel() + ctx := context.Background() sourceFiler, sourceDir := tc.setupSource(t) targetFiler, targetDir := tc.setupTarget(t) setupSourceDir(t, context.Background(), sourceFiler) @@ -213,7 +219,7 @@ func TestAccFsCpDirToDirFileNotOverwritten(t *testing.T) { err := targetFiler.Write(context.Background(), "a/b/c/hello.txt", strings.NewReader("this should not be overwritten"), filer.CreateParentDirectories) require.NoError(t, err) - RequireSuccessfulRun(t, "fs", "cp", sourceDir, targetDir, "--recursive") + testcli.RequireSuccessfulRun(t, ctx, "fs", "cp", sourceDir, targetDir, "--recursive") assertFileContent(t, context.Background(), targetFiler, "a/b/c/hello.txt", "this should not be overwritten") assertFileContent(t, context.Background(), targetFiler, "query.sql", "SELECT 1") assertFileContent(t, context.Background(), targetFiler, "pyNb.py", "# Databricks notebook source\nprint(123)") @@ -221,7 +227,7 @@ func TestAccFsCpDirToDirFileNotOverwritten(t *testing.T) { } } -func TestAccFsCpFileToDirFileNotOverwritten(t *testing.T) { +func TestFsCpFileToDirFileNotOverwritten(t *testing.T) { t.Parallel() for _, testCase := range copyTests() { @@ -230,6 +236,7 @@ func TestAccFsCpFileToDirFileNotOverwritten(t *testing.T) { t.Run(tc.name, func(t *testing.T) { t.Parallel() + ctx := context.Background() sourceFiler, sourceDir := tc.setupSource(t) targetFiler, targetDir := tc.setupTarget(t) setupSourceDir(t, context.Background(), sourceFiler) @@ -238,13 +245,13 @@ func TestAccFsCpFileToDirFileNotOverwritten(t *testing.T) { err := targetFiler.Write(context.Background(), "a/b/c/hello.txt", strings.NewReader("this should not be overwritten"), filer.CreateParentDirectories) require.NoError(t, err) - RequireSuccessfulRun(t, "fs", "cp", path.Join(sourceDir, "a/b/c/hello.txt"), path.Join(targetDir, "a/b/c")) + testcli.RequireSuccessfulRun(t, ctx, "fs", "cp", path.Join(sourceDir, "a/b/c/hello.txt"), path.Join(targetDir, "a/b/c")) assertFileContent(t, context.Background(), targetFiler, "a/b/c/hello.txt", "this should not be overwritten") }) } } -func TestAccFsCpFileToFileFileNotOverwritten(t *testing.T) { +func TestFsCpFileToFileFileNotOverwritten(t *testing.T) { t.Parallel() for _, testCase := range copyTests() { @@ -253,6 +260,7 @@ func TestAccFsCpFileToFileFileNotOverwritten(t *testing.T) { t.Run(tc.name, func(t *testing.T) { t.Parallel() + ctx := context.Background() sourceFiler, sourceDir := tc.setupSource(t) targetFiler, targetDir := tc.setupTarget(t) setupSourceDir(t, context.Background(), sourceFiler) @@ -261,13 +269,13 @@ func TestAccFsCpFileToFileFileNotOverwritten(t *testing.T) { err := targetFiler.Write(context.Background(), "a/b/c/dontoverwrite.txt", strings.NewReader("this should not be overwritten"), filer.CreateParentDirectories) require.NoError(t, err) - RequireSuccessfulRun(t, "fs", "cp", path.Join(sourceDir, "a/b/c/hello.txt"), path.Join(targetDir, "a/b/c/dontoverwrite.txt")) + testcli.RequireSuccessfulRun(t, ctx, "fs", "cp", path.Join(sourceDir, "a/b/c/hello.txt"), path.Join(targetDir, "a/b/c/dontoverwrite.txt")) assertFileContent(t, context.Background(), targetFiler, "a/b/c/dontoverwrite.txt", "this should not be overwritten") }) } } -func TestAccFsCpDirToDirWithOverwriteFlag(t *testing.T) { +func TestFsCpDirToDirWithOverwriteFlag(t *testing.T) { t.Parallel() for _, testCase := range copyTests() { @@ -276,6 +284,7 @@ func TestAccFsCpDirToDirWithOverwriteFlag(t *testing.T) { t.Run(tc.name, func(t *testing.T) { t.Parallel() + ctx := context.Background() sourceFiler, sourceDir := tc.setupSource(t) targetFiler, targetDir := tc.setupTarget(t) setupSourceDir(t, context.Background(), sourceFiler) @@ -284,13 +293,13 @@ func TestAccFsCpDirToDirWithOverwriteFlag(t *testing.T) { err := targetFiler.Write(context.Background(), "a/b/c/hello.txt", strings.NewReader("this should be overwritten"), filer.CreateParentDirectories) require.NoError(t, err) - RequireSuccessfulRun(t, "fs", "cp", sourceDir, targetDir, "--recursive", "--overwrite") + testcli.RequireSuccessfulRun(t, ctx, "fs", "cp", sourceDir, targetDir, "--recursive", "--overwrite") assertTargetDir(t, context.Background(), targetFiler) }) } } -func TestAccFsCpFileToFileWithOverwriteFlag(t *testing.T) { +func TestFsCpFileToFileWithOverwriteFlag(t *testing.T) { t.Parallel() for _, testCase := range copyTests() { @@ -299,6 +308,7 @@ func TestAccFsCpFileToFileWithOverwriteFlag(t *testing.T) { t.Run(tc.name, func(t *testing.T) { t.Parallel() + ctx := context.Background() sourceFiler, sourceDir := tc.setupSource(t) targetFiler, targetDir := tc.setupTarget(t) setupSourceDir(t, context.Background(), sourceFiler) @@ -307,13 +317,13 @@ func TestAccFsCpFileToFileWithOverwriteFlag(t *testing.T) { err := targetFiler.Write(context.Background(), "a/b/c/overwritten.txt", strings.NewReader("this should be overwritten"), filer.CreateParentDirectories) require.NoError(t, err) - RequireSuccessfulRun(t, "fs", "cp", path.Join(sourceDir, "a/b/c/hello.txt"), path.Join(targetDir, "a/b/c/overwritten.txt"), "--overwrite") + testcli.RequireSuccessfulRun(t, ctx, "fs", "cp", path.Join(sourceDir, "a/b/c/hello.txt"), path.Join(targetDir, "a/b/c/overwritten.txt"), "--overwrite") assertFileContent(t, context.Background(), targetFiler, "a/b/c/overwritten.txt", "hello, world\n") }) } } -func TestAccFsCpFileToDirWithOverwriteFlag(t *testing.T) { +func TestFsCpFileToDirWithOverwriteFlag(t *testing.T) { t.Parallel() for _, testCase := range copyTests() { @@ -322,6 +332,7 @@ func TestAccFsCpFileToDirWithOverwriteFlag(t *testing.T) { t.Run(tc.name, func(t *testing.T) { t.Parallel() + ctx := context.Background() sourceFiler, sourceDir := tc.setupSource(t) targetFiler, targetDir := tc.setupTarget(t) setupSourceDir(t, context.Background(), sourceFiler) @@ -330,13 +341,13 @@ func TestAccFsCpFileToDirWithOverwriteFlag(t *testing.T) { err := targetFiler.Write(context.Background(), "a/b/c/hello.txt", strings.NewReader("this should be overwritten"), filer.CreateParentDirectories) require.NoError(t, err) - RequireSuccessfulRun(t, "fs", "cp", path.Join(sourceDir, "a/b/c/hello.txt"), path.Join(targetDir, "a/b/c"), "--overwrite") + testcli.RequireSuccessfulRun(t, ctx, "fs", "cp", path.Join(sourceDir, "a/b/c/hello.txt"), path.Join(targetDir, "a/b/c"), "--overwrite") assertFileContent(t, context.Background(), targetFiler, "a/b/c/hello.txt", "hello, world\n") }) } } -func TestAccFsCpErrorsWhenSourceIsDirWithoutRecursiveFlag(t *testing.T) { +func TestFsCpErrorsWhenSourceIsDirWithoutRecursiveFlag(t *testing.T) { t.Parallel() for _, testCase := range fsTests { @@ -345,23 +356,23 @@ func TestAccFsCpErrorsWhenSourceIsDirWithoutRecursiveFlag(t *testing.T) { t.Run(tc.name, func(t *testing.T) { t.Parallel() + ctx := context.Background() _, tmpDir := tc.setupFiler(t) - _, _, err := RequireErrorRun(t, "fs", "cp", path.Join(tmpDir), path.Join(tmpDir, "foobar")) + _, _, err := testcli.RequireErrorRun(t, ctx, "fs", "cp", path.Join(tmpDir), path.Join(tmpDir, "foobar")) r := regexp.MustCompile("source path .* is a directory. Please specify the --recursive flag") assert.Regexp(t, r, err.Error()) }) } } -func TestAccFsCpErrorsOnInvalidScheme(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) - - _, _, err := RequireErrorRun(t, "fs", "cp", "dbfs:/a", "https:/b") +func TestFsCpErrorsOnInvalidScheme(t *testing.T) { + ctx := context.Background() + _, _, err := testcli.RequireErrorRun(t, ctx, "fs", "cp", "dbfs:/a", "https:/b") assert.Equal(t, "invalid scheme: https", err.Error()) } -func TestAccFsCpSourceIsDirectoryButTargetIsFile(t *testing.T) { +func TestFsCpSourceIsDirectoryButTargetIsFile(t *testing.T) { t.Parallel() for _, testCase := range copyTests() { @@ -370,6 +381,7 @@ func TestAccFsCpSourceIsDirectoryButTargetIsFile(t *testing.T) { t.Run(tc.name, func(t *testing.T) { t.Parallel() + ctx := context.Background() sourceFiler, sourceDir := tc.setupSource(t) targetFiler, targetDir := tc.setupTarget(t) setupSourceDir(t, context.Background(), sourceFiler) @@ -378,7 +390,7 @@ func TestAccFsCpSourceIsDirectoryButTargetIsFile(t *testing.T) { err := targetFiler.Write(context.Background(), "my_target", strings.NewReader("I'll block any attempts to recursively copy"), filer.CreateParentDirectories) require.NoError(t, err) - _, _, err = RequireErrorRun(t, "fs", "cp", sourceDir, path.Join(targetDir, "my_target"), "--recursive") + _, _, err = testcli.RequireErrorRun(t, ctx, "fs", "cp", sourceDir, path.Join(targetDir, "my_target"), "--recursive") assert.Error(t, err) }) } diff --git a/integration/cmd/fs/helpers_test.go b/integration/cmd/fs/helpers_test.go new file mode 100644 index 000000000..e1bebb28f --- /dev/null +++ b/integration/cmd/fs/helpers_test.go @@ -0,0 +1,44 @@ +package fs_test + +import ( + "os" + "path" + "path/filepath" + + "github.com/databricks/cli/integration/internal/acc" + "github.com/databricks/cli/internal/testutil" + + "github.com/databricks/cli/libs/filer" + "github.com/stretchr/testify/require" +) + +func setupLocalFiler(t testutil.TestingT) (filer.Filer, string) { + tmp := t.TempDir() + f, err := filer.NewLocalClient(tmp) + require.NoError(t, err) + + return f, path.Join(filepath.ToSlash(tmp)) +} + +func setupDbfsFiler(t testutil.TestingT) (filer.Filer, string) { + _, wt := acc.WorkspaceTest(t) + + tmpdir := acc.TemporaryDbfsDir(wt) + f, err := filer.NewDbfsClient(wt.W, tmpdir) + require.NoError(t, err) + return f, path.Join("dbfs:/", tmpdir) +} + +func setupUcVolumesFiler(t testutil.TestingT) (filer.Filer, string) { + _, wt := acc.WorkspaceTest(t) + + if os.Getenv("TEST_METASTORE_ID") == "" { + t.Skip("Skipping tests that require a UC Volume when metastore id is not set.") + } + + tmpdir := acc.TemporaryVolume(wt) + f, err := filer.NewFilesClient(wt.W, tmpdir) + require.NoError(t, err) + + return f, path.Join("dbfs:/", tmpdir) +} diff --git a/internal/fs_ls_test.go b/integration/cmd/fs/ls_test.go similarity index 73% rename from internal/fs_ls_test.go rename to integration/cmd/fs/ls_test.go index 994a4a425..58e776d8a 100644 --- a/internal/fs_ls_test.go +++ b/integration/cmd/fs/ls_test.go @@ -1,4 +1,4 @@ -package internal +package fs_test import ( "context" @@ -10,6 +10,8 @@ import ( "testing" _ "github.com/databricks/cli/cmd/fs" + "github.com/databricks/cli/internal/testcli" + "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/filer" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -17,7 +19,7 @@ import ( type fsTest struct { name string - setupFiler func(t *testing.T) (filer.Filer, string) + setupFiler func(t testutil.TestingT) (filer.Filer, string) } var fsTests = []fsTest{ @@ -38,7 +40,7 @@ func setupLsFiles(t *testing.T, f filer.Filer) { require.NoError(t, err) } -func TestAccFsLs(t *testing.T) { +func TestFsLs(t *testing.T) { t.Parallel() for _, testCase := range fsTests { @@ -47,10 +49,11 @@ func TestAccFsLs(t *testing.T) { t.Run(tc.name, func(t *testing.T) { t.Parallel() + ctx := context.Background() f, tmpDir := tc.setupFiler(t) setupLsFiles(t, f) - stdout, stderr := RequireSuccessfulRun(t, "fs", "ls", tmpDir, "--output=json") + stdout, stderr := testcli.RequireSuccessfulRun(t, ctx, "fs", "ls", tmpDir, "--output=json") assert.Equal(t, "", stderr.String()) var parsedStdout []map[string]any @@ -71,7 +74,7 @@ func TestAccFsLs(t *testing.T) { } } -func TestAccFsLsWithAbsolutePaths(t *testing.T) { +func TestFsLsWithAbsolutePaths(t *testing.T) { t.Parallel() for _, testCase := range fsTests { @@ -80,10 +83,11 @@ func TestAccFsLsWithAbsolutePaths(t *testing.T) { t.Run(tc.name, func(t *testing.T) { t.Parallel() + ctx := context.Background() f, tmpDir := tc.setupFiler(t) setupLsFiles(t, f) - stdout, stderr := RequireSuccessfulRun(t, "fs", "ls", tmpDir, "--output=json", "--absolute") + stdout, stderr := testcli.RequireSuccessfulRun(t, ctx, "fs", "ls", tmpDir, "--output=json", "--absolute") assert.Equal(t, "", stderr.String()) var parsedStdout []map[string]any @@ -104,7 +108,7 @@ func TestAccFsLsWithAbsolutePaths(t *testing.T) { } } -func TestAccFsLsOnFile(t *testing.T) { +func TestFsLsOnFile(t *testing.T) { t.Parallel() for _, testCase := range fsTests { @@ -112,17 +116,19 @@ func TestAccFsLsOnFile(t *testing.T) { t.Run(tc.name, func(t *testing.T) { t.Parallel() + + ctx := context.Background() f, tmpDir := tc.setupFiler(t) setupLsFiles(t, f) - _, _, err := RequireErrorRun(t, "fs", "ls", path.Join(tmpDir, "a", "hello.txt"), "--output=json") + _, _, err := testcli.RequireErrorRun(t, ctx, "fs", "ls", path.Join(tmpDir, "a", "hello.txt"), "--output=json") assert.Regexp(t, regexp.MustCompile("not a directory: .*/a/hello.txt"), err.Error()) assert.ErrorAs(t, err, &filer.NotADirectory{}) }) } } -func TestAccFsLsOnEmptyDir(t *testing.T) { +func TestFsLsOnEmptyDir(t *testing.T) { t.Parallel() for _, testCase := range fsTests { @@ -131,9 +137,10 @@ func TestAccFsLsOnEmptyDir(t *testing.T) { t.Run(tc.name, func(t *testing.T) { t.Parallel() + ctx := context.Background() _, tmpDir := tc.setupFiler(t) - stdout, stderr := RequireSuccessfulRun(t, "fs", "ls", tmpDir, "--output=json") + stdout, stderr := testcli.RequireSuccessfulRun(t, ctx, "fs", "ls", tmpDir, "--output=json") assert.Equal(t, "", stderr.String()) var parsedStdout []map[string]any err := json.Unmarshal(stdout.Bytes(), &parsedStdout) @@ -145,7 +152,7 @@ func TestAccFsLsOnEmptyDir(t *testing.T) { } } -func TestAccFsLsForNonexistingDir(t *testing.T) { +func TestFsLsForNonexistingDir(t *testing.T) { t.Parallel() for _, testCase := range fsTests { @@ -154,20 +161,20 @@ func TestAccFsLsForNonexistingDir(t *testing.T) { t.Run(tc.name, func(t *testing.T) { t.Parallel() + ctx := context.Background() _, tmpDir := tc.setupFiler(t) - _, _, err := RequireErrorRun(t, "fs", "ls", path.Join(tmpDir, "nonexistent"), "--output=json") + _, _, err := testcli.RequireErrorRun(t, ctx, "fs", "ls", path.Join(tmpDir, "nonexistent"), "--output=json") assert.ErrorIs(t, err, fs.ErrNotExist) assert.Regexp(t, regexp.MustCompile("no such directory: .*/nonexistent"), err.Error()) }) } } -func TestAccFsLsWithoutScheme(t *testing.T) { +func TestFsLsWithoutScheme(t *testing.T) { t.Parallel() - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) - - _, _, err := RequireErrorRun(t, "fs", "ls", "/path-without-a-dbfs-scheme", "--output=json") + ctx := context.Background() + _, _, err := testcli.RequireErrorRun(t, ctx, "fs", "ls", "/path-without-a-dbfs-scheme", "--output=json") assert.ErrorIs(t, err, fs.ErrNotExist) } diff --git a/integration/cmd/fs/main_test.go b/integration/cmd/fs/main_test.go new file mode 100644 index 000000000..b9402f0b2 --- /dev/null +++ b/integration/cmd/fs/main_test.go @@ -0,0 +1,13 @@ +package fs_test + +import ( + "testing" + + "github.com/databricks/cli/integration/internal" +) + +// TestMain is the entrypoint executed by the test runner. +// See [internal.Main] for prerequisites for running integration tests. +func TestMain(m *testing.M) { + internal.Main(m) +} diff --git a/internal/fs_mkdir_test.go b/integration/cmd/fs/mkdir_test.go similarity index 75% rename from internal/fs_mkdir_test.go rename to integration/cmd/fs/mkdir_test.go index 9191f6143..f332bb526 100644 --- a/internal/fs_mkdir_test.go +++ b/integration/cmd/fs/mkdir_test.go @@ -1,4 +1,4 @@ -package internal +package fs_test import ( "context" @@ -7,12 +7,13 @@ import ( "strings" "testing" + "github.com/databricks/cli/internal/testcli" "github.com/databricks/cli/libs/filer" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) -func TestAccFsMkdir(t *testing.T) { +func TestFsMkdir(t *testing.T) { t.Parallel() for _, testCase := range fsTests { @@ -21,10 +22,11 @@ func TestAccFsMkdir(t *testing.T) { t.Run(tc.name, func(t *testing.T) { t.Parallel() + ctx := context.Background() f, tmpDir := tc.setupFiler(t) // create directory "a" - stdout, stderr := RequireSuccessfulRun(t, "fs", "mkdir", path.Join(tmpDir, "a")) + stdout, stderr := testcli.RequireSuccessfulRun(t, ctx, "fs", "mkdir", path.Join(tmpDir, "a")) assert.Equal(t, "", stderr.String()) assert.Equal(t, "", stdout.String()) @@ -37,7 +39,7 @@ func TestAccFsMkdir(t *testing.T) { } } -func TestAccFsMkdirCreatesIntermediateDirectories(t *testing.T) { +func TestFsMkdirCreatesIntermediateDirectories(t *testing.T) { t.Parallel() for _, testCase := range fsTests { @@ -46,10 +48,11 @@ func TestAccFsMkdirCreatesIntermediateDirectories(t *testing.T) { t.Run(tc.name, func(t *testing.T) { t.Parallel() + ctx := context.Background() f, tmpDir := tc.setupFiler(t) // create directory "a/b/c" - stdout, stderr := RequireSuccessfulRun(t, "fs", "mkdir", path.Join(tmpDir, "a", "b", "c")) + stdout, stderr := testcli.RequireSuccessfulRun(t, ctx, "fs", "mkdir", path.Join(tmpDir, "a", "b", "c")) assert.Equal(t, "", stderr.String()) assert.Equal(t, "", stdout.String()) @@ -74,7 +77,7 @@ func TestAccFsMkdirCreatesIntermediateDirectories(t *testing.T) { } } -func TestAccFsMkdirWhenDirectoryAlreadyExists(t *testing.T) { +func TestFsMkdirWhenDirectoryAlreadyExists(t *testing.T) { t.Parallel() for _, testCase := range fsTests { @@ -83,6 +86,7 @@ func TestAccFsMkdirWhenDirectoryAlreadyExists(t *testing.T) { t.Run(tc.name, func(t *testing.T) { t.Parallel() + ctx := context.Background() f, tmpDir := tc.setupFiler(t) // create directory "a" @@ -90,19 +94,20 @@ func TestAccFsMkdirWhenDirectoryAlreadyExists(t *testing.T) { require.NoError(t, err) // assert run is successful without any errors - stdout, stderr := RequireSuccessfulRun(t, "fs", "mkdir", path.Join(tmpDir, "a")) + stdout, stderr := testcli.RequireSuccessfulRun(t, ctx, "fs", "mkdir", path.Join(tmpDir, "a")) assert.Equal(t, "", stderr.String()) assert.Equal(t, "", stdout.String()) }) } } -func TestAccFsMkdirWhenFileExistsAtPath(t *testing.T) { +func TestFsMkdirWhenFileExistsAtPath(t *testing.T) { t.Parallel() t.Run("dbfs", func(t *testing.T) { t.Parallel() + ctx := context.Background() f, tmpDir := setupDbfsFiler(t) // create file "hello" @@ -110,7 +115,7 @@ func TestAccFsMkdirWhenFileExistsAtPath(t *testing.T) { require.NoError(t, err) // assert mkdir fails - _, _, err = RequireErrorRun(t, "fs", "mkdir", path.Join(tmpDir, "hello")) + _, _, err = testcli.RequireErrorRun(t, ctx, "fs", "mkdir", path.Join(tmpDir, "hello")) // Different cloud providers or cloud configurations return different errors. regex := regexp.MustCompile(`(^|: )Path is a file: .*$|(^|: )Cannot create directory .* because .* is an existing file\.$|(^|: )mkdirs\(hadoopPath: .*, permission: rwxrwxrwx\): failed$|(^|: )"The specified path already exists.".*$`) @@ -120,6 +125,7 @@ func TestAccFsMkdirWhenFileExistsAtPath(t *testing.T) { t.Run("uc-volumes", func(t *testing.T) { t.Parallel() + ctx := context.Background() f, tmpDir := setupUcVolumesFiler(t) // create file "hello" @@ -127,7 +133,7 @@ func TestAccFsMkdirWhenFileExistsAtPath(t *testing.T) { require.NoError(t, err) // assert mkdir fails - _, _, err = RequireErrorRun(t, "fs", "mkdir", path.Join(tmpDir, "hello")) + _, _, err = testcli.RequireErrorRun(t, ctx, "fs", "mkdir", path.Join(tmpDir, "hello")) assert.ErrorAs(t, err, &filer.FileAlreadyExistsError{}) }) diff --git a/internal/fs_rm_test.go b/integration/cmd/fs/rm_test.go similarity index 77% rename from internal/fs_rm_test.go rename to integration/cmd/fs/rm_test.go index e86f5713b..018c7920e 100644 --- a/internal/fs_rm_test.go +++ b/integration/cmd/fs/rm_test.go @@ -1,4 +1,4 @@ -package internal +package fs_test import ( "context" @@ -7,12 +7,13 @@ import ( "strings" "testing" + "github.com/databricks/cli/internal/testcli" "github.com/databricks/cli/libs/filer" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) -func TestAccFsRmFile(t *testing.T) { +func TestFsRmFile(t *testing.T) { t.Parallel() for _, testCase := range fsTests { @@ -22,6 +23,7 @@ func TestAccFsRmFile(t *testing.T) { t.Parallel() // Create a file + ctx := context.Background() f, tmpDir := tc.setupFiler(t) err := f.Write(context.Background(), "hello.txt", strings.NewReader("abcd"), filer.CreateParentDirectories) require.NoError(t, err) @@ -31,7 +33,7 @@ func TestAccFsRmFile(t *testing.T) { assert.NoError(t, err) // Run rm command - stdout, stderr := RequireSuccessfulRun(t, "fs", "rm", path.Join(tmpDir, "hello.txt")) + stdout, stderr := testcli.RequireSuccessfulRun(t, ctx, "fs", "rm", path.Join(tmpDir, "hello.txt")) assert.Equal(t, "", stderr.String()) assert.Equal(t, "", stdout.String()) @@ -42,7 +44,7 @@ func TestAccFsRmFile(t *testing.T) { } } -func TestAccFsRmEmptyDir(t *testing.T) { +func TestFsRmEmptyDir(t *testing.T) { t.Parallel() for _, testCase := range fsTests { @@ -52,6 +54,7 @@ func TestAccFsRmEmptyDir(t *testing.T) { t.Parallel() // Create a directory + ctx := context.Background() f, tmpDir := tc.setupFiler(t) err := f.Mkdir(context.Background(), "a") require.NoError(t, err) @@ -61,7 +64,7 @@ func TestAccFsRmEmptyDir(t *testing.T) { assert.NoError(t, err) // Run rm command - stdout, stderr := RequireSuccessfulRun(t, "fs", "rm", path.Join(tmpDir, "a")) + stdout, stderr := testcli.RequireSuccessfulRun(t, ctx, "fs", "rm", path.Join(tmpDir, "a")) assert.Equal(t, "", stderr.String()) assert.Equal(t, "", stdout.String()) @@ -72,7 +75,7 @@ func TestAccFsRmEmptyDir(t *testing.T) { } } -func TestAccFsRmNonEmptyDirectory(t *testing.T) { +func TestFsRmNonEmptyDirectory(t *testing.T) { t.Parallel() for _, testCase := range fsTests { @@ -82,6 +85,7 @@ func TestAccFsRmNonEmptyDirectory(t *testing.T) { t.Parallel() // Create a directory + ctx := context.Background() f, tmpDir := tc.setupFiler(t) err := f.Mkdir(context.Background(), "a") require.NoError(t, err) @@ -95,14 +99,14 @@ func TestAccFsRmNonEmptyDirectory(t *testing.T) { assert.NoError(t, err) // Run rm command - _, _, err = RequireErrorRun(t, "fs", "rm", path.Join(tmpDir, "a")) + _, _, err = testcli.RequireErrorRun(t, ctx, "fs", "rm", path.Join(tmpDir, "a")) assert.ErrorIs(t, err, fs.ErrInvalid) assert.ErrorAs(t, err, &filer.DirectoryNotEmptyError{}) }) } } -func TestAccFsRmForNonExistentFile(t *testing.T) { +func TestFsRmForNonExistentFile(t *testing.T) { t.Parallel() for _, testCase := range fsTests { @@ -111,17 +115,17 @@ func TestAccFsRmForNonExistentFile(t *testing.T) { t.Run(tc.name, func(t *testing.T) { t.Parallel() + ctx := context.Background() _, tmpDir := tc.setupFiler(t) // Expect error if file does not exist - _, _, err := RequireErrorRun(t, "fs", "rm", path.Join(tmpDir, "does-not-exist")) + _, _, err := testcli.RequireErrorRun(t, ctx, "fs", "rm", path.Join(tmpDir, "does-not-exist")) assert.ErrorIs(t, err, fs.ErrNotExist) }) } - } -func TestAccFsRmDirRecursively(t *testing.T) { +func TestFsRmDirRecursively(t *testing.T) { t.Parallel() for _, testCase := range fsTests { @@ -130,6 +134,7 @@ func TestAccFsRmDirRecursively(t *testing.T) { t.Run(tc.name, func(t *testing.T) { t.Parallel() + ctx := context.Background() f, tmpDir := tc.setupFiler(t) // Create a directory @@ -145,7 +150,7 @@ func TestAccFsRmDirRecursively(t *testing.T) { assert.NoError(t, err) // Run rm command - stdout, stderr := RequireSuccessfulRun(t, "fs", "rm", path.Join(tmpDir, "a"), "--recursive") + stdout, stderr := testcli.RequireSuccessfulRun(t, ctx, "fs", "rm", path.Join(tmpDir, "a"), "--recursive") assert.Equal(t, "", stderr.String()) assert.Equal(t, "", stdout.String()) diff --git a/integration/cmd/jobs/jobs_test.go b/integration/cmd/jobs/jobs_test.go new file mode 100644 index 000000000..b6bcfc5b3 --- /dev/null +++ b/integration/cmd/jobs/jobs_test.go @@ -0,0 +1,24 @@ +package jobs_test + +import ( + "context" + "encoding/json" + "fmt" + "testing" + + "github.com/databricks/cli/internal/testcli" + "github.com/databricks/cli/internal/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestCreateJob(t *testing.T) { + testutil.Require(t, testutil.Azure) + ctx := context.Background() + stdout, stderr := testcli.RequireSuccessfulRun(t, ctx, "jobs", "create", "--json", "@testdata/create_job_without_workers.json", "--log-level=debug") + assert.Empty(t, stderr.String()) + var output map[string]int + err := json.Unmarshal(stdout.Bytes(), &output) + require.NoError(t, err) + testcli.RequireSuccessfulRun(t, ctx, "jobs", "delete", fmt.Sprint(output["job_id"]), "--log-level=debug") +} diff --git a/integration/cmd/jobs/main_test.go b/integration/cmd/jobs/main_test.go new file mode 100644 index 000000000..46369a526 --- /dev/null +++ b/integration/cmd/jobs/main_test.go @@ -0,0 +1,13 @@ +package jobs_test + +import ( + "testing" + + "github.com/databricks/cli/integration/internal" +) + +// TestMain is the entrypoint executed by the test runner. +// See [internal.Main] for prerequisites for running integration tests. +func TestMain(m *testing.M) { + internal.Main(m) +} diff --git a/internal/testjsons/create_job_without_workers.json b/integration/cmd/jobs/testdata/create_job_without_workers.json similarity index 100% rename from internal/testjsons/create_job_without_workers.json rename to integration/cmd/jobs/testdata/create_job_without_workers.json diff --git a/integration/cmd/main_test.go b/integration/cmd/main_test.go new file mode 100644 index 000000000..a1a5586b6 --- /dev/null +++ b/integration/cmd/main_test.go @@ -0,0 +1,13 @@ +package cmd_test + +import ( + "testing" + + "github.com/databricks/cli/integration/internal" +) + +// TestMain is the entrypoint executed by the test runner. +// See [internal.Main] for prerequisites for running integration tests. +func TestMain(m *testing.M) { + internal.Main(m) +} diff --git a/integration/cmd/repos/main_test.go b/integration/cmd/repos/main_test.go new file mode 100644 index 000000000..7eaa174bc --- /dev/null +++ b/integration/cmd/repos/main_test.go @@ -0,0 +1,13 @@ +package repos_test + +import ( + "testing" + + "github.com/databricks/cli/integration/internal" +) + +// TestMain is the entrypoint executed by the test runner. +// See [internal.Main] for prerequisites for running integration tests. +func TestMain(m *testing.M) { + internal.Main(m) +} diff --git a/internal/repos_test.go b/integration/cmd/repos/repos_test.go similarity index 55% rename from internal/repos_test.go rename to integration/cmd/repos/repos_test.go index 1ad0e8775..7526a14ca 100644 --- a/internal/repos_test.go +++ b/integration/cmd/repos/repos_test.go @@ -1,4 +1,4 @@ -package internal +package repos_test import ( "context" @@ -6,6 +6,9 @@ import ( "strconv" "testing" + "github.com/databricks/cli/integration/internal/acc" + "github.com/databricks/cli/internal/testcli" + "github.com/databricks/cli/internal/testutil" "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/apierr" "github.com/databricks/databricks-sdk-go/service/workspace" @@ -13,10 +16,12 @@ import ( "github.com/stretchr/testify/require" ) +const repoUrl = "https://github.com/databricks/databricks-empty-ide-project.git" + func synthesizeTemporaryRepoPath(t *testing.T, w *databricks.WorkspaceClient, ctx context.Context) string { me, err := w.CurrentUser.Me(ctx) require.NoError(t, err) - repoPath := fmt.Sprintf("/Repos/%s/%s", me.UserName, RandomName("empty-repo-integration-")) + repoPath := fmt.Sprintf("/Repos/%s/%s", me.UserName, testutil.RandomName("empty-repo-integration-")) // Cleanup if repo was created at specified path. t.Cleanup(func() { @@ -43,15 +48,12 @@ func createTemporaryRepo(t *testing.T, w *databricks.WorkspaceClient, ctx contex return repoInfo.Id, repoPath } -func TestAccReposCreateWithProvider(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) - - ctx := context.Background() - w, err := databricks.NewWorkspaceClient() - require.NoError(t, err) +func TestReposCreateWithProvider(t *testing.T) { + ctx, wt := acc.WorkspaceTest(t) + w := wt.W repoPath := synthesizeTemporaryRepoPath(t, w, ctx) - _, stderr := RequireSuccessfulRun(t, "repos", "create", repoUrl, "gitHub", "--path", repoPath) + _, stderr := testcli.RequireSuccessfulRun(t, ctx, "repos", "create", repoUrl, "gitHub", "--path", repoPath) assert.Equal(t, "", stderr.String()) // Confirm the repo was created. @@ -60,15 +62,12 @@ func TestAccReposCreateWithProvider(t *testing.T) { assert.Equal(t, workspace.ObjectTypeRepo, oi.ObjectType) } -func TestAccReposCreateWithoutProvider(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) - - ctx := context.Background() - w, err := databricks.NewWorkspaceClient() - require.NoError(t, err) +func TestReposCreateWithoutProvider(t *testing.T) { + ctx, wt := acc.WorkspaceTest(t) + w := wt.W repoPath := synthesizeTemporaryRepoPath(t, w, ctx) - _, stderr := RequireSuccessfulRun(t, "repos", "create", repoUrl, "--path", repoPath) + _, stderr := testcli.RequireSuccessfulRun(t, ctx, "repos", "create", repoUrl, "--path", repoPath) assert.Equal(t, "", stderr.String()) // Confirm the repo was created. @@ -77,90 +76,78 @@ func TestAccReposCreateWithoutProvider(t *testing.T) { assert.Equal(t, workspace.ObjectTypeRepo, oi.ObjectType) } -func TestAccReposGet(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) - - ctx := context.Background() - w, err := databricks.NewWorkspaceClient() - require.NoError(t, err) +func TestReposGet(t *testing.T) { + ctx, wt := acc.WorkspaceTest(t) + w := wt.W repoId, repoPath := createTemporaryRepo(t, w, ctx) // Get by ID - byIdOutput, stderr := RequireSuccessfulRun(t, "repos", "get", strconv.FormatInt(repoId, 10), "--output=json") + byIdOutput, stderr := testcli.RequireSuccessfulRun(t, ctx, "repos", "get", strconv.FormatInt(repoId, 10), "--output=json") assert.Equal(t, "", stderr.String()) // Get by path - byPathOutput, stderr := RequireSuccessfulRun(t, "repos", "get", repoPath, "--output=json") + byPathOutput, stderr := testcli.RequireSuccessfulRun(t, ctx, "repos", "get", repoPath, "--output=json") assert.Equal(t, "", stderr.String()) // Output should be the same assert.Equal(t, byIdOutput.String(), byPathOutput.String()) // Get by path fails - _, stderr, err = RequireErrorRun(t, "repos", "get", repoPath+"-doesntexist", "--output=json") + _, stderr, err := testcli.RequireErrorRun(t, ctx, "repos", "get", repoPath+"-doesntexist", "--output=json") assert.ErrorContains(t, err, "failed to look up repo") // Get by path resolves to something other than a repo - _, stderr, err = RequireErrorRun(t, "repos", "get", "/Repos", "--output=json") + _, stderr, err = testcli.RequireErrorRun(t, ctx, "repos", "get", "/Repos", "--output=json") assert.ErrorContains(t, err, "is not a repo") } -func TestAccReposUpdate(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) - - ctx := context.Background() - w, err := databricks.NewWorkspaceClient() - require.NoError(t, err) +func TestReposUpdate(t *testing.T) { + ctx, wt := acc.WorkspaceTest(t) + w := wt.W repoId, repoPath := createTemporaryRepo(t, w, ctx) // Update by ID - byIdOutput, stderr := RequireSuccessfulRun(t, "repos", "update", strconv.FormatInt(repoId, 10), "--branch", "ide") + byIdOutput, stderr := testcli.RequireSuccessfulRun(t, ctx, "repos", "update", strconv.FormatInt(repoId, 10), "--branch", "ide") assert.Equal(t, "", stderr.String()) // Update by path - byPathOutput, stderr := RequireSuccessfulRun(t, "repos", "update", repoPath, "--branch", "ide") + byPathOutput, stderr := testcli.RequireSuccessfulRun(t, ctx, "repos", "update", repoPath, "--branch", "ide") assert.Equal(t, "", stderr.String()) // Output should be the same assert.Equal(t, byIdOutput.String(), byPathOutput.String()) } -func TestAccReposDeleteByID(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) - - ctx := context.Background() - w, err := databricks.NewWorkspaceClient() - require.NoError(t, err) +func TestReposDeleteByID(t *testing.T) { + ctx, wt := acc.WorkspaceTest(t) + w := wt.W repoId, _ := createTemporaryRepo(t, w, ctx) // Delete by ID - stdout, stderr := RequireSuccessfulRun(t, "repos", "delete", strconv.FormatInt(repoId, 10)) + stdout, stderr := testcli.RequireSuccessfulRun(t, ctx, "repos", "delete", strconv.FormatInt(repoId, 10)) assert.Equal(t, "", stdout.String()) assert.Equal(t, "", stderr.String()) // Check it was actually deleted - _, err = w.Repos.GetByRepoId(ctx, repoId) + _, err := w.Repos.GetByRepoId(ctx, repoId) assert.True(t, apierr.IsMissing(err), err) } -func TestAccReposDeleteByPath(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) - - ctx := context.Background() - w, err := databricks.NewWorkspaceClient() - require.NoError(t, err) +func TestReposDeleteByPath(t *testing.T) { + ctx, wt := acc.WorkspaceTest(t) + w := wt.W repoId, repoPath := createTemporaryRepo(t, w, ctx) // Delete by path - stdout, stderr := RequireSuccessfulRun(t, "repos", "delete", repoPath) + stdout, stderr := testcli.RequireSuccessfulRun(t, ctx, "repos", "delete", repoPath) assert.Equal(t, "", stdout.String()) assert.Equal(t, "", stderr.String()) // Check it was actually deleted - _, err = w.Repos.GetByRepoId(ctx, repoId) + _, err := w.Repos.GetByRepoId(ctx, repoId) assert.True(t, apierr.IsMissing(err), err) } diff --git a/integration/cmd/secrets/main_test.go b/integration/cmd/secrets/main_test.go new file mode 100644 index 000000000..a44d30671 --- /dev/null +++ b/integration/cmd/secrets/main_test.go @@ -0,0 +1,13 @@ +package secrets_test + +import ( + "testing" + + "github.com/databricks/cli/integration/internal" +) + +// TestMain is the entrypoint executed by the test runner. +// See [internal.Main] for prerequisites for running integration tests. +func TestMain(m *testing.M) { + internal.Main(m) +} diff --git a/internal/secrets_test.go b/integration/cmd/secrets/secrets_test.go similarity index 76% rename from internal/secrets_test.go rename to integration/cmd/secrets/secrets_test.go index 59e5d6150..43ad54de2 100644 --- a/internal/secrets_test.go +++ b/integration/cmd/secrets/secrets_test.go @@ -1,4 +1,4 @@ -package internal +package secrets_test import ( "context" @@ -6,19 +6,22 @@ import ( "fmt" "testing" - "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/integration/internal/acc" + "github.com/databricks/cli/internal/testcli" + "github.com/databricks/cli/internal/testutil" "github.com/databricks/databricks-sdk-go/service/workspace" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestSecretsCreateScopeErrWhenNoArguments(t *testing.T) { - _, _, err := RequireErrorRun(t, "secrets", "create-scope") + ctx := context.Background() + _, _, err := testcli.RequireErrorRun(t, ctx, "secrets", "create-scope") assert.Contains(t, err.Error(), "accepts 1 arg(s), received 0") } func temporarySecretScope(ctx context.Context, t *acc.WorkspaceT) string { - scope := acc.RandomName("cli-acc-") + scope := testutil.RandomName("cli-acc-") err := t.W.Secrets.CreateScope(ctx, workspace.CreateScope{ Scope: scope, }) @@ -61,13 +64,13 @@ func assertSecretBytesValue(t *acc.WorkspaceT, scope, key string, expected []byt assert.Equal(t, expected, decoded) } -func TestAccSecretsPutSecretStringValue(tt *testing.T) { +func TestSecretsPutSecretStringValue(tt *testing.T) { ctx, t := acc.WorkspaceTest(tt) scope := temporarySecretScope(ctx, t) key := "test-key" value := "test-value\nwith-newlines\n" - stdout, stderr := RequireSuccessfulRun(t.T, "secrets", "put-secret", scope, key, "--string-value", value) + stdout, stderr := testcli.RequireSuccessfulRun(t, ctx, "secrets", "put-secret", scope, key, "--string-value", value) assert.Empty(t, stdout) assert.Empty(t, stderr) @@ -75,13 +78,13 @@ func TestAccSecretsPutSecretStringValue(tt *testing.T) { assertSecretBytesValue(t, scope, key, []byte(value)) } -func TestAccSecretsPutSecretBytesValue(tt *testing.T) { +func TestSecretsPutSecretBytesValue(tt *testing.T) { ctx, t := acc.WorkspaceTest(tt) scope := temporarySecretScope(ctx, t) key := "test-key" value := []byte{0x00, 0x01, 0x02, 0x03} - stdout, stderr := RequireSuccessfulRun(t.T, "secrets", "put-secret", scope, key, "--bytes-value", string(value)) + stdout, stderr := testcli.RequireSuccessfulRun(t, ctx, "secrets", "put-secret", scope, key, "--bytes-value", string(value)) assert.Empty(t, stdout) assert.Empty(t, stderr) diff --git a/integration/cmd/storage_credentials/main_test.go b/integration/cmd/storage_credentials/main_test.go new file mode 100644 index 000000000..14d00d966 --- /dev/null +++ b/integration/cmd/storage_credentials/main_test.go @@ -0,0 +1,13 @@ +package storage_credentials_test + +import ( + "testing" + + "github.com/databricks/cli/integration/internal" +) + +// TestMain is the entrypoint executed by the test runner. +// See [internal.Main] for prerequisites for running integration tests. +func TestMain(m *testing.M) { + internal.Main(m) +} diff --git a/integration/cmd/storage_credentials/storage_credentials_test.go b/integration/cmd/storage_credentials/storage_credentials_test.go new file mode 100644 index 000000000..e4b861312 --- /dev/null +++ b/integration/cmd/storage_credentials/storage_credentials_test.go @@ -0,0 +1,21 @@ +package storage_credentials_test + +import ( + "testing" + + "github.com/databricks/cli/integration/internal/acc" + "github.com/databricks/cli/internal/testcli" + "github.com/databricks/cli/internal/testutil" + "github.com/stretchr/testify/assert" +) + +func TestStorageCredentialsListRendersResponse(t *testing.T) { + ctx, _ := acc.WorkspaceTest(t) + + // Check if metastore is assigned for the workspace, otherwise test will fail + t.Log(testutil.GetEnvOrSkipTest(t, "TEST_METASTORE_ID")) + + stdout, stderr := testcli.RequireSuccessfulRun(t, ctx, "storage-credentials", "list") + assert.NotEmpty(t, stdout) + assert.Empty(t, stderr) +} diff --git a/integration/cmd/sync/main_test.go b/integration/cmd/sync/main_test.go new file mode 100644 index 000000000..8d9f3ca25 --- /dev/null +++ b/integration/cmd/sync/main_test.go @@ -0,0 +1,13 @@ +package sync_test + +import ( + "testing" + + "github.com/databricks/cli/integration/internal" +) + +// TestMain is the entrypoint executed by the test runner. +// See [internal.Main] for prerequisites for running integration tests. +func TestMain(m *testing.M) { + internal.Main(m) +} diff --git a/internal/sync_test.go b/integration/cmd/sync/sync_test.go similarity index 86% rename from internal/sync_test.go rename to integration/cmd/sync/sync_test.go index 6f8b1827b..984f6ac49 100644 --- a/internal/sync_test.go +++ b/integration/cmd/sync/sync_test.go @@ -1,4 +1,4 @@ -package internal +package sync_test import ( "context" @@ -15,7 +15,9 @@ import ( "testing" "time" - _ "github.com/databricks/cli/cmd/sync" + "github.com/databricks/cli/integration/internal/acc" + "github.com/databricks/cli/internal/testcli" + "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/filer" "github.com/databricks/cli/libs/sync" "github.com/databricks/cli/libs/testfile" @@ -36,7 +38,7 @@ var ( func setupRepo(t *testing.T, wsc *databricks.WorkspaceClient, ctx context.Context) (localRoot, remoteRoot string) { me, err := wsc.CurrentUser.Me(ctx) require.NoError(t, err) - repoPath := fmt.Sprintf("/Repos/%s/%s", me.UserName, RandomName("empty-repo-sync-integration-")) + repoPath := fmt.Sprintf("/Repos/%s/%s", me.UserName, testutil.RandomName("empty-repo-sync-integration-")) repoInfo, err := wsc.Repos.Create(ctx, workspace.CreateRepoRequest{ Path: repoPath, @@ -63,19 +65,19 @@ func setupRepo(t *testing.T, wsc *databricks.WorkspaceClient, ctx context.Contex type syncTest struct { t *testing.T - c *cobraTestRunner + c *testcli.Runner w *databricks.WorkspaceClient f filer.Filer localRoot string remoteRoot string } -func setupSyncTest(t *testing.T, args ...string) *syncTest { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) +func setupSyncTest(t *testing.T, args ...string) (context.Context, *syncTest) { + ctx, wt := acc.WorkspaceTest(t) + w := wt.W - w := databricks.Must(databricks.NewWorkspaceClient()) localRoot := t.TempDir() - remoteRoot := TemporaryWorkspaceDir(t, w) + remoteRoot := acc.TemporaryWorkspaceDir(wt, "sync-") f, err := filer.NewWorkspaceFilesClient(w, remoteRoot) require.NoError(t, err) @@ -88,10 +90,10 @@ func setupSyncTest(t *testing.T, args ...string) *syncTest { "json", }, args...) - c := NewCobraTestRunner(t, args...) + c := testcli.NewRunner(t, ctx, args...) c.RunBackground() - return &syncTest{ + return ctx, &syncTest{ t: t, c: c, w: w, @@ -109,7 +111,7 @@ func (s *syncTest) waitForCompletionMarker() { select { case <-ctx.Done(): s.t.Fatal("timed out waiting for sync to complete") - case line := <-s.c.stdoutLines: + case line := <-s.c.StdoutLines: var event sync.EventBase err := json.Unmarshal([]byte(line), &event) require.NoError(s.t, err) @@ -145,7 +147,7 @@ func (a *syncTest) remoteDirContent(ctx context.Context, relativeDir string, exp } } -func (a *syncTest) remoteFileContent(ctx context.Context, relativePath string, expectedContent string) { +func (a *syncTest) remoteFileContent(ctx context.Context, relativePath, expectedContent string) { filePath := path.Join(a.remoteRoot, relativePath) // Remove leading "/" so we can use it in the URL. @@ -181,7 +183,7 @@ func (a *syncTest) touchFile(ctx context.Context, path string) { require.NoError(a.t, err) } -func (a *syncTest) objectType(ctx context.Context, relativePath string, expected string) { +func (a *syncTest) objectType(ctx context.Context, relativePath, expected string) { path := path.Join(a.remoteRoot, relativePath) a.c.Eventually(func() bool { @@ -193,7 +195,7 @@ func (a *syncTest) objectType(ctx context.Context, relativePath string, expected }, 30*time.Second, 5*time.Second) } -func (a *syncTest) language(ctx context.Context, relativePath string, expected string) { +func (a *syncTest) language(ctx context.Context, relativePath, expected string) { path := path.Join(a.remoteRoot, relativePath) a.c.Eventually(func() bool { @@ -228,9 +230,8 @@ func (a *syncTest) snapshotContains(files []string) { assert.Equal(a.t, len(files), len(s.LastModifiedTimes)) } -func TestAccSyncFullFileSync(t *testing.T) { - ctx := context.Background() - assertSync := setupSyncTest(t, "--full", "--watch") +func TestSyncFullFileSync(t *testing.T) { + ctx, assertSync := setupSyncTest(t, "--full", "--watch") // .gitignore is created by the sync process to enforce .databricks is not synced assertSync.waitForCompletionMarker() @@ -260,9 +261,8 @@ func TestAccSyncFullFileSync(t *testing.T) { assertSync.remoteDirContent(ctx, "", append(repoFiles, ".gitignore")) } -func TestAccSyncIncrementalFileSync(t *testing.T) { - ctx := context.Background() - assertSync := setupSyncTest(t, "--watch") +func TestSyncIncrementalFileSync(t *testing.T) { + ctx, assertSync := setupSyncTest(t, "--watch") // .gitignore is created by the sync process to enforce .databricks is not synced assertSync.waitForCompletionMarker() @@ -294,9 +294,8 @@ func TestAccSyncIncrementalFileSync(t *testing.T) { assertSync.snapshotContains(append(repoFiles, ".gitignore")) } -func TestAccSyncNestedFolderSync(t *testing.T) { - ctx := context.Background() - assertSync := setupSyncTest(t, "--watch") +func TestSyncNestedFolderSync(t *testing.T) { + ctx, assertSync := setupSyncTest(t, "--watch") // .gitignore is created by the sync process to enforce .databricks is not synced assertSync.waitForCompletionMarker() @@ -322,9 +321,8 @@ func TestAccSyncNestedFolderSync(t *testing.T) { assertSync.snapshotContains(append(repoFiles, ".gitignore")) } -func TestAccSyncNestedFolderDoesntFailOnNonEmptyDirectory(t *testing.T) { - ctx := context.Background() - assertSync := setupSyncTest(t, "--watch") +func TestSyncNestedFolderDoesntFailOnNonEmptyDirectory(t *testing.T) { + ctx, assertSync := setupSyncTest(t, "--watch") // .gitignore is created by the sync process to enforce .databricks is not synced assertSync.waitForCompletionMarker() @@ -355,9 +353,8 @@ func TestAccSyncNestedFolderDoesntFailOnNonEmptyDirectory(t *testing.T) { assertSync.remoteExists(ctx, "dir1") } -func TestAccSyncNestedSpacePlusAndHashAreEscapedSync(t *testing.T) { - ctx := context.Background() - assertSync := setupSyncTest(t, "--watch") +func TestSyncNestedSpacePlusAndHashAreEscapedSync(t *testing.T) { + ctx, assertSync := setupSyncTest(t, "--watch") // .gitignore is created by the sync process to enforce .databricks is not synced assertSync.waitForCompletionMarker() @@ -391,9 +388,8 @@ func TestAccSyncNestedSpacePlusAndHashAreEscapedSync(t *testing.T) { // // In the above scenario sync should delete the empty folder and add foo to the remote // file system -func TestAccSyncIncrementalFileOverwritesFolder(t *testing.T) { - ctx := context.Background() - assertSync := setupSyncTest(t, "--watch") +func TestSyncIncrementalFileOverwritesFolder(t *testing.T) { + ctx, assertSync := setupSyncTest(t, "--watch") // create foo/bar.txt localFilePath := filepath.Join(assertSync.localRoot, "foo/bar.txt") @@ -421,9 +417,8 @@ func TestAccSyncIncrementalFileOverwritesFolder(t *testing.T) { assertSync.snapshotContains(append(repoFiles, ".gitignore", "foo")) } -func TestAccSyncIncrementalSyncPythonNotebookToFile(t *testing.T) { - ctx := context.Background() - assertSync := setupSyncTest(t, "--watch") +func TestSyncIncrementalSyncPythonNotebookToFile(t *testing.T) { + ctx, assertSync := setupSyncTest(t, "--watch") // create python notebook localFilePath := filepath.Join(assertSync.localRoot, "foo.py") @@ -452,9 +447,8 @@ func TestAccSyncIncrementalSyncPythonNotebookToFile(t *testing.T) { assertSync.snapshotContains(append(repoFiles, ".gitignore")) } -func TestAccSyncIncrementalSyncFileToPythonNotebook(t *testing.T) { - ctx := context.Background() - assertSync := setupSyncTest(t, "--watch") +func TestSyncIncrementalSyncFileToPythonNotebook(t *testing.T) { + ctx, assertSync := setupSyncTest(t, "--watch") // create vanilla python file localFilePath := filepath.Join(assertSync.localRoot, "foo.py") @@ -476,9 +470,8 @@ func TestAccSyncIncrementalSyncFileToPythonNotebook(t *testing.T) { assertSync.snapshotContains(append(repoFiles, ".gitignore", "foo.py")) } -func TestAccSyncIncrementalSyncPythonNotebookDelete(t *testing.T) { - ctx := context.Background() - assertSync := setupSyncTest(t, "--watch") +func TestSyncIncrementalSyncPythonNotebookDelete(t *testing.T) { + ctx, assertSync := setupSyncTest(t, "--watch") // create python notebook localFilePath := filepath.Join(assertSync.localRoot, "foo.py") @@ -498,17 +491,15 @@ func TestAccSyncIncrementalSyncPythonNotebookDelete(t *testing.T) { assertSync.remoteDirContent(ctx, "", append(repoFiles, ".gitignore")) } -func TestAccSyncEnsureRemotePathIsUsableIfRepoDoesntExist(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) - - wsc := databricks.Must(databricks.NewWorkspaceClient()) - ctx := context.Background() +func TestSyncEnsureRemotePathIsUsableIfRepoDoesntExist(t *testing.T) { + ctx, wt := acc.WorkspaceTest(t) + wsc := wt.W me, err := wsc.CurrentUser.Me(ctx) require.NoError(t, err) // Hypothetical repo path doesn't exist. - nonExistingRepoPath := fmt.Sprintf("/Repos/%s/%s", me.UserName, RandomName("doesnt-exist-")) + nonExistingRepoPath := fmt.Sprintf("/Repos/%s/%s", me.UserName, testutil.RandomName("doesnt-exist-")) err = sync.EnsureRemotePathIsUsable(ctx, wsc, nonExistingRepoPath, nil) assert.ErrorContains(t, err, " does not exist; please create it first") @@ -518,11 +509,10 @@ func TestAccSyncEnsureRemotePathIsUsableIfRepoDoesntExist(t *testing.T) { assert.ErrorContains(t, err, " does not exist; please create it first") } -func TestAccSyncEnsureRemotePathIsUsableIfRepoExists(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) +func TestSyncEnsureRemotePathIsUsableIfRepoExists(t *testing.T) { + ctx, wt := acc.WorkspaceTest(t) + wsc := wt.W - wsc := databricks.Must(databricks.NewWorkspaceClient()) - ctx := context.Background() _, remoteRepoPath := setupRepo(t, wsc, ctx) // Repo itself is usable. @@ -540,15 +530,14 @@ func TestAccSyncEnsureRemotePathIsUsableIfRepoExists(t *testing.T) { require.Equal(t, workspace.ObjectTypeDirectory, info.ObjectType) } -func TestAccSyncEnsureRemotePathIsUsableInWorkspace(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) +func TestSyncEnsureRemotePathIsUsableInWorkspace(t *testing.T) { + ctx, wt := acc.WorkspaceTest(t) + wsc := wt.W - wsc := databricks.Must(databricks.NewWorkspaceClient()) - ctx := context.Background() me, err := wsc.CurrentUser.Me(ctx) require.NoError(t, err) - remotePath := fmt.Sprintf("/Users/%s/%s", me.UserName, RandomName("ensure-path-exists-test-")) + remotePath := fmt.Sprintf("/Users/%s/%s", me.UserName, testutil.RandomName("ensure-path-exists-test-")) err = sync.EnsureRemotePathIsUsable(ctx, wsc, remotePath, me) assert.NoError(t, err) diff --git a/internal/unknown_command_test.go b/integration/cmd/unknown_command_test.go similarity index 63% rename from internal/unknown_command_test.go rename to integration/cmd/unknown_command_test.go index 62b84027f..fd87a77ff 100644 --- a/internal/unknown_command_test.go +++ b/integration/cmd/unknown_command_test.go @@ -1,13 +1,16 @@ -package internal +package cmd_test import ( + "context" "testing" + "github.com/databricks/cli/internal/testcli" assert "github.com/databricks/cli/libs/dyn/dynassert" ) func TestUnknownCommand(t *testing.T) { - stdout, stderr, err := RequireErrorRun(t, "unknown-command") + ctx := context.Background() + stdout, stderr, err := testcli.RequireErrorRun(t, ctx, "unknown-command") assert.Error(t, err, "unknown command", `unknown command "unknown-command" for "databricks"`) assert.Equal(t, "", stdout.String()) diff --git a/integration/cmd/version/main_test.go b/integration/cmd/version/main_test.go new file mode 100644 index 000000000..4aa5e046a --- /dev/null +++ b/integration/cmd/version/main_test.go @@ -0,0 +1,13 @@ +package version_test + +import ( + "testing" + + "github.com/databricks/cli/integration/internal" +) + +// TestMain is the entrypoint executed by the test runner. +// See [internal.Main] for prerequisites for running integration tests. +func TestMain(m *testing.M) { + internal.Main(m) +} diff --git a/internal/version_test.go b/integration/cmd/version/version_test.go similarity index 66% rename from internal/version_test.go rename to integration/cmd/version/version_test.go index 7dba63cd8..b12974d69 100644 --- a/internal/version_test.go +++ b/integration/cmd/version/version_test.go @@ -1,36 +1,42 @@ -package internal +package version_test import ( + "context" "encoding/json" "fmt" "testing" "github.com/databricks/cli/internal/build" + "github.com/databricks/cli/internal/testcli" "github.com/stretchr/testify/assert" ) var expectedVersion = fmt.Sprintf("Databricks CLI v%s\n", build.GetInfo().Version) func TestVersionFlagShort(t *testing.T) { - stdout, stderr := RequireSuccessfulRun(t, "-v") + ctx := context.Background() + stdout, stderr := testcli.RequireSuccessfulRun(t, ctx, "-v") assert.Equal(t, expectedVersion, stdout.String()) assert.Equal(t, "", stderr.String()) } func TestVersionFlagLong(t *testing.T) { - stdout, stderr := RequireSuccessfulRun(t, "--version") + ctx := context.Background() + stdout, stderr := testcli.RequireSuccessfulRun(t, ctx, "--version") assert.Equal(t, expectedVersion, stdout.String()) assert.Equal(t, "", stderr.String()) } func TestVersionCommand(t *testing.T) { - stdout, stderr := RequireSuccessfulRun(t, "version") + ctx := context.Background() + stdout, stderr := testcli.RequireSuccessfulRun(t, ctx, "version") assert.Equal(t, expectedVersion, stdout.String()) assert.Equal(t, "", stderr.String()) } func TestVersionCommandWithJSONOutput(t *testing.T) { - stdout, stderr := RequireSuccessfulRun(t, "version", "--output", "json") + ctx := context.Background() + stdout, stderr := testcli.RequireSuccessfulRun(t, ctx, "version", "--output", "json") assert.NotEmpty(t, stdout.String()) assert.Equal(t, "", stderr.String()) diff --git a/integration/cmd/workspace/main_test.go b/integration/cmd/workspace/main_test.go new file mode 100644 index 000000000..40d140eac --- /dev/null +++ b/integration/cmd/workspace/main_test.go @@ -0,0 +1,13 @@ +package workspace_test + +import ( + "testing" + + "github.com/databricks/cli/integration/internal" +) + +// TestMain is the entrypoint executed by the test runner. +// See [internal.Main] for prerequisites for running integration tests. +func TestMain(m *testing.M) { + internal.Main(m) +} diff --git a/internal/testdata/import_dir/a/b/c/file-b b/integration/cmd/workspace/testdata/import_dir/a/b/c/file-b similarity index 100% rename from internal/testdata/import_dir/a/b/c/file-b rename to integration/cmd/workspace/testdata/import_dir/a/b/c/file-b diff --git a/internal/testdata/import_dir/file-a b/integration/cmd/workspace/testdata/import_dir/file-a similarity index 100% rename from internal/testdata/import_dir/file-a rename to integration/cmd/workspace/testdata/import_dir/file-a diff --git a/internal/testdata/import_dir/jupyterNotebook.ipynb b/integration/cmd/workspace/testdata/import_dir/jupyterNotebook.ipynb similarity index 100% rename from internal/testdata/import_dir/jupyterNotebook.ipynb rename to integration/cmd/workspace/testdata/import_dir/jupyterNotebook.ipynb diff --git a/internal/testdata/import_dir/pyNotebook.py b/integration/cmd/workspace/testdata/import_dir/pyNotebook.py similarity index 100% rename from internal/testdata/import_dir/pyNotebook.py rename to integration/cmd/workspace/testdata/import_dir/pyNotebook.py diff --git a/internal/testdata/import_dir/rNotebook.r b/integration/cmd/workspace/testdata/import_dir/rNotebook.r similarity index 100% rename from internal/testdata/import_dir/rNotebook.r rename to integration/cmd/workspace/testdata/import_dir/rNotebook.r diff --git a/internal/testdata/import_dir/scalaNotebook.scala b/integration/cmd/workspace/testdata/import_dir/scalaNotebook.scala similarity index 100% rename from internal/testdata/import_dir/scalaNotebook.scala rename to integration/cmd/workspace/testdata/import_dir/scalaNotebook.scala diff --git a/internal/testdata/import_dir/sqlNotebook.sql b/integration/cmd/workspace/testdata/import_dir/sqlNotebook.sql similarity index 100% rename from internal/testdata/import_dir/sqlNotebook.sql rename to integration/cmd/workspace/testdata/import_dir/sqlNotebook.sql diff --git a/internal/workspace_test.go b/integration/cmd/workspace/workspace_test.go similarity index 77% rename from internal/workspace_test.go rename to integration/cmd/workspace/workspace_test.go index 445361654..4edbbfc83 100644 --- a/internal/workspace_test.go +++ b/integration/cmd/workspace/workspace_test.go @@ -1,4 +1,4 @@ -package internal +package workspace_test import ( "context" @@ -11,18 +11,17 @@ import ( "strings" "testing" - "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/integration/internal/acc" + "github.com/databricks/cli/internal/testcli" "github.com/databricks/cli/libs/filer" - "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/service/workspace" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) -func TestAccWorkspaceList(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) - - stdout, stderr := RequireSuccessfulRun(t, "workspace", "list", "/") +func TestWorkspaceList(t *testing.T) { + ctx := context.Background() + stdout, stderr := testcli.RequireSuccessfulRun(t, ctx, "workspace", "list", "/") outStr := stdout.String() assert.Contains(t, outStr, "ID") assert.Contains(t, outStr, "Type") @@ -32,21 +31,22 @@ func TestAccWorkspaceList(t *testing.T) { } func TestWorkpaceListErrorWhenNoArguments(t *testing.T) { - _, _, err := RequireErrorRun(t, "workspace", "list") + ctx := context.Background() + _, _, err := testcli.RequireErrorRun(t, ctx, "workspace", "list") assert.Contains(t, err.Error(), "accepts 1 arg(s), received 0") } func TestWorkpaceGetStatusErrorWhenNoArguments(t *testing.T) { - _, _, err := RequireErrorRun(t, "workspace", "get-status") + ctx := context.Background() + _, _, err := testcli.RequireErrorRun(t, ctx, "workspace", "get-status") assert.Contains(t, err.Error(), "accepts 1 arg(s), received 0") } -func TestAccWorkpaceExportPrintsContents(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) +func TestWorkpaceExportPrintsContents(t *testing.T) { + ctx, wt := acc.WorkspaceTest(t) + w := wt.W - ctx := context.Background() - w := databricks.Must(databricks.NewWorkspaceClient()) - tmpdir := TemporaryWorkspaceDir(t, w) + tmpdir := acc.TemporaryWorkspaceDir(wt, "workspace-export-") f, err := filer.NewWorkspaceFilesClient(w, tmpdir) require.NoError(t, err) @@ -56,29 +56,30 @@ func TestAccWorkpaceExportPrintsContents(t *testing.T) { require.NoError(t, err) // Run export - stdout, stderr := RequireSuccessfulRun(t, "workspace", "export", path.Join(tmpdir, "file-a")) + stdout, stderr := testcli.RequireSuccessfulRun(t, ctx, "workspace", "export", path.Join(tmpdir, "file-a")) assert.Equal(t, contents, stdout.String()) assert.Equal(t, "", stderr.String()) } func setupWorkspaceImportExportTest(t *testing.T) (context.Context, filer.Filer, string) { ctx, wt := acc.WorkspaceTest(t) + w := wt.W - tmpdir := TemporaryWorkspaceDir(t, wt.W) - f, err := filer.NewWorkspaceFilesClient(wt.W, tmpdir) + tmpdir := acc.TemporaryWorkspaceDir(wt, "workspace-import-") + f, err := filer.NewWorkspaceFilesClient(w, tmpdir) require.NoError(t, err) return ctx, f, tmpdir } -func assertLocalFileContents(t *testing.T, path string, content string) { +func assertLocalFileContents(t *testing.T, path, content string) { require.FileExists(t, path) b, err := os.ReadFile(path) require.NoError(t, err) assert.Contains(t, string(b), content) } -func assertFilerFileContents(t *testing.T, ctx context.Context, f filer.Filer, path string, content string) { +func assertFilerFileContents(t *testing.T, ctx context.Context, f filer.Filer, path, content string) { r, err := f.Read(ctx, path) require.NoError(t, err) b, err := io.ReadAll(r) @@ -92,7 +93,7 @@ func assertWorkspaceFileType(t *testing.T, ctx context.Context, f filer.Filer, p assert.Equal(t, fileType, info.Sys().(workspace.ObjectInfo).ObjectType) } -func TestAccExportDir(t *testing.T) { +func TestExportDir(t *testing.T) { ctx, f, sourceDir := setupWorkspaceImportExportTest(t) targetDir := t.TempDir() @@ -124,7 +125,7 @@ func TestAccExportDir(t *testing.T) { }, "\n") // Run Export - stdout, stderr := RequireSuccessfulRun(t, "workspace", "export-dir", sourceDir, targetDir) + stdout, stderr := testcli.RequireSuccessfulRun(t, ctx, "workspace", "export-dir", sourceDir, targetDir) assert.Equal(t, expectedLogs, stdout.String()) assert.Equal(t, "", stderr.String()) @@ -137,7 +138,7 @@ func TestAccExportDir(t *testing.T) { assertLocalFileContents(t, filepath.Join(targetDir, "a/b/c/file-b"), "def") } -func TestAccExportDirDoesNotOverwrite(t *testing.T) { +func TestExportDirDoesNotOverwrite(t *testing.T) { ctx, f, sourceDir := setupWorkspaceImportExportTest(t) targetDir := t.TempDir() @@ -152,13 +153,13 @@ func TestAccExportDirDoesNotOverwrite(t *testing.T) { require.NoError(t, err) // Run Export - RequireSuccessfulRun(t, "workspace", "export-dir", sourceDir, targetDir) + testcli.RequireSuccessfulRun(t, ctx, "workspace", "export-dir", sourceDir, targetDir) // Assert file is not overwritten assertLocalFileContents(t, filepath.Join(targetDir, "file-a"), "local content") } -func TestAccExportDirWithOverwriteFlag(t *testing.T) { +func TestExportDirWithOverwriteFlag(t *testing.T) { ctx, f, sourceDir := setupWorkspaceImportExportTest(t) targetDir := t.TempDir() @@ -173,15 +174,15 @@ func TestAccExportDirWithOverwriteFlag(t *testing.T) { require.NoError(t, err) // Run Export - RequireSuccessfulRun(t, "workspace", "export-dir", sourceDir, targetDir, "--overwrite") + testcli.RequireSuccessfulRun(t, ctx, "workspace", "export-dir", sourceDir, targetDir, "--overwrite") // Assert file has been overwritten assertLocalFileContents(t, filepath.Join(targetDir, "file-a"), "content from workspace") } -func TestAccImportDir(t *testing.T) { +func TestImportDir(t *testing.T) { ctx, workspaceFiler, targetDir := setupWorkspaceImportExportTest(t) - stdout, stderr := RequireSuccessfulRun(t, "workspace", "import-dir", "./testdata/import_dir", targetDir, "--log-level=debug") + stdout, stderr := testcli.RequireSuccessfulRun(t, ctx, "workspace", "import-dir", "./testdata/import_dir", targetDir, "--log-level=debug") expectedLogs := strings.Join([]string{ fmt.Sprintf("Importing files from %s", "./testdata/import_dir"), @@ -208,7 +209,7 @@ func TestAccImportDir(t *testing.T) { assertFilerFileContents(t, ctx, workspaceFiler, "jupyterNotebook", "# Databricks notebook source\nprint(\"jupyter\")") } -func TestAccImportDirDoesNotOverwrite(t *testing.T) { +func TestImportDirDoesNotOverwrite(t *testing.T) { ctx, workspaceFiler, targetDir := setupWorkspaceImportExportTest(t) var err error @@ -222,7 +223,7 @@ func TestAccImportDirDoesNotOverwrite(t *testing.T) { assertFilerFileContents(t, ctx, workspaceFiler, "file-a", "old file") assertFilerFileContents(t, ctx, workspaceFiler, "pyNotebook", "# Databricks notebook source\nprint(\"old notebook\")") - RequireSuccessfulRun(t, "workspace", "import-dir", "./testdata/import_dir", targetDir) + testcli.RequireSuccessfulRun(t, ctx, "workspace", "import-dir", "./testdata/import_dir", targetDir) // Assert files are imported assertFilerFileContents(t, ctx, workspaceFiler, "a/b/c/file-b", "file-in-dir") @@ -236,7 +237,7 @@ func TestAccImportDirDoesNotOverwrite(t *testing.T) { assertFilerFileContents(t, ctx, workspaceFiler, "pyNotebook", "# Databricks notebook source\nprint(\"old notebook\")") } -func TestAccImportDirWithOverwriteFlag(t *testing.T) { +func TestImportDirWithOverwriteFlag(t *testing.T) { ctx, workspaceFiler, targetDir := setupWorkspaceImportExportTest(t) var err error @@ -250,7 +251,7 @@ func TestAccImportDirWithOverwriteFlag(t *testing.T) { assertFilerFileContents(t, ctx, workspaceFiler, "file-a", "old file") assertFilerFileContents(t, ctx, workspaceFiler, "pyNotebook", "# Databricks notebook source\nprint(\"old notebook\")") - RequireSuccessfulRun(t, "workspace", "import-dir", "./testdata/import_dir", targetDir, "--overwrite") + testcli.RequireSuccessfulRun(t, ctx, "workspace", "import-dir", "./testdata/import_dir", targetDir, "--overwrite") // Assert files are imported assertFilerFileContents(t, ctx, workspaceFiler, "a/b/c/file-b", "file-in-dir") @@ -264,7 +265,7 @@ func TestAccImportDirWithOverwriteFlag(t *testing.T) { assertFilerFileContents(t, ctx, workspaceFiler, "pyNotebook", "# Databricks notebook source\nprint(\"python\")") } -func TestAccExport(t *testing.T) { +func TestExport(t *testing.T) { ctx, f, sourceDir := setupWorkspaceImportExportTest(t) var err error @@ -272,7 +273,7 @@ func TestAccExport(t *testing.T) { // Export vanilla file err = f.Write(ctx, "file-a", strings.NewReader("abc")) require.NoError(t, err) - stdout, _ := RequireSuccessfulRun(t, "workspace", "export", path.Join(sourceDir, "file-a")) + stdout, _ := testcli.RequireSuccessfulRun(t, ctx, "workspace", "export", path.Join(sourceDir, "file-a")) b, err := io.ReadAll(&stdout) require.NoError(t, err) assert.Equal(t, "abc", string(b)) @@ -280,20 +281,20 @@ func TestAccExport(t *testing.T) { // Export python notebook err = f.Write(ctx, "pyNotebook.py", strings.NewReader("# Databricks notebook source")) require.NoError(t, err) - stdout, _ = RequireSuccessfulRun(t, "workspace", "export", path.Join(sourceDir, "pyNotebook")) + stdout, _ = testcli.RequireSuccessfulRun(t, ctx, "workspace", "export", path.Join(sourceDir, "pyNotebook")) b, err = io.ReadAll(&stdout) require.NoError(t, err) assert.Equal(t, "# Databricks notebook source\n", string(b)) // Export python notebook as jupyter - stdout, _ = RequireSuccessfulRun(t, "workspace", "export", path.Join(sourceDir, "pyNotebook"), "--format", "JUPYTER") + stdout, _ = testcli.RequireSuccessfulRun(t, ctx, "workspace", "export", path.Join(sourceDir, "pyNotebook"), "--format", "JUPYTER") b, err = io.ReadAll(&stdout) require.NoError(t, err) assert.Contains(t, string(b), `"cells":`, "jupyter notebooks contain the cells field") assert.Contains(t, string(b), `"metadata":`, "jupyter notebooks contain the metadata field") } -func TestAccExportWithFileFlag(t *testing.T) { +func TestExportWithFileFlag(t *testing.T) { ctx, f, sourceDir := setupWorkspaceImportExportTest(t) localTmpDir := t.TempDir() @@ -302,7 +303,7 @@ func TestAccExportWithFileFlag(t *testing.T) { // Export vanilla file err = f.Write(ctx, "file-a", strings.NewReader("abc")) require.NoError(t, err) - stdout, _ := RequireSuccessfulRun(t, "workspace", "export", path.Join(sourceDir, "file-a"), "--file", filepath.Join(localTmpDir, "file.txt")) + stdout, _ := testcli.RequireSuccessfulRun(t, ctx, "workspace", "export", path.Join(sourceDir, "file-a"), "--file", filepath.Join(localTmpDir, "file.txt")) b, err := io.ReadAll(&stdout) require.NoError(t, err) // Expect nothing to be printed to stdout @@ -312,14 +313,14 @@ func TestAccExportWithFileFlag(t *testing.T) { // Export python notebook err = f.Write(ctx, "pyNotebook.py", strings.NewReader("# Databricks notebook source")) require.NoError(t, err) - stdout, _ = RequireSuccessfulRun(t, "workspace", "export", path.Join(sourceDir, "pyNotebook"), "--file", filepath.Join(localTmpDir, "pyNb.py")) + stdout, _ = testcli.RequireSuccessfulRun(t, ctx, "workspace", "export", path.Join(sourceDir, "pyNotebook"), "--file", filepath.Join(localTmpDir, "pyNb.py")) b, err = io.ReadAll(&stdout) require.NoError(t, err) assert.Equal(t, "", string(b)) assertLocalFileContents(t, filepath.Join(localTmpDir, "pyNb.py"), "# Databricks notebook source\n") // Export python notebook as jupyter - stdout, _ = RequireSuccessfulRun(t, "workspace", "export", path.Join(sourceDir, "pyNotebook"), "--format", "JUPYTER", "--file", filepath.Join(localTmpDir, "jupyterNb.ipynb")) + stdout, _ = testcli.RequireSuccessfulRun(t, ctx, "workspace", "export", path.Join(sourceDir, "pyNotebook"), "--format", "JUPYTER", "--file", filepath.Join(localTmpDir, "jupyterNb.ipynb")) b, err = io.ReadAll(&stdout) require.NoError(t, err) assert.Equal(t, "", string(b)) @@ -327,75 +328,75 @@ func TestAccExportWithFileFlag(t *testing.T) { assertLocalFileContents(t, filepath.Join(localTmpDir, "jupyterNb.ipynb"), `"metadata":`) } -func TestAccImportFileUsingContentFormatSource(t *testing.T) { +func TestImportFileUsingContentFormatSource(t *testing.T) { ctx, workspaceFiler, targetDir := setupWorkspaceImportExportTest(t) // Content = `print(1)`. Uploaded as a notebook by default - RequireSuccessfulRun(t, "workspace", "import", path.Join(targetDir, "pyScript"), + testcli.RequireSuccessfulRun(t, ctx, "workspace", "import", path.Join(targetDir, "pyScript"), "--content", base64.StdEncoding.EncodeToString([]byte("print(1)")), "--language=PYTHON") assertFilerFileContents(t, ctx, workspaceFiler, "pyScript", "print(1)") assertWorkspaceFileType(t, ctx, workspaceFiler, "pyScript", workspace.ObjectTypeNotebook) // Import with content = `# Databricks notebook source\nprint(1)`. Uploaded as a notebook with the content just being print(1) - RequireSuccessfulRun(t, "workspace", "import", path.Join(targetDir, "pyNb"), + testcli.RequireSuccessfulRun(t, ctx, "workspace", "import", path.Join(targetDir, "pyNb"), "--content", base64.StdEncoding.EncodeToString([]byte("`# Databricks notebook source\nprint(1)")), "--language=PYTHON") assertFilerFileContents(t, ctx, workspaceFiler, "pyNb", "print(1)") assertWorkspaceFileType(t, ctx, workspaceFiler, "pyNb", workspace.ObjectTypeNotebook) } -func TestAccImportFileUsingContentFormatAuto(t *testing.T) { +func TestImportFileUsingContentFormatAuto(t *testing.T) { ctx, workspaceFiler, targetDir := setupWorkspaceImportExportTest(t) // Content = `# Databricks notebook source\nprint(1)`. Upload as file if path has no extension. - RequireSuccessfulRun(t, "workspace", "import", path.Join(targetDir, "py-nb-as-file"), + testcli.RequireSuccessfulRun(t, ctx, "workspace", "import", path.Join(targetDir, "py-nb-as-file"), "--content", base64.StdEncoding.EncodeToString([]byte("`# Databricks notebook source\nprint(1)")), "--format=AUTO") assertFilerFileContents(t, ctx, workspaceFiler, "py-nb-as-file", "# Databricks notebook source\nprint(1)") assertWorkspaceFileType(t, ctx, workspaceFiler, "py-nb-as-file", workspace.ObjectTypeFile) // Content = `# Databricks notebook source\nprint(1)`. Upload as notebook if path has py extension - RequireSuccessfulRun(t, "workspace", "import", path.Join(targetDir, "py-nb-as-notebook.py"), + testcli.RequireSuccessfulRun(t, ctx, "workspace", "import", path.Join(targetDir, "py-nb-as-notebook.py"), "--content", base64.StdEncoding.EncodeToString([]byte("`# Databricks notebook source\nprint(1)")), "--format=AUTO") assertFilerFileContents(t, ctx, workspaceFiler, "py-nb-as-notebook", "# Databricks notebook source\nprint(1)") assertWorkspaceFileType(t, ctx, workspaceFiler, "py-nb-as-notebook", workspace.ObjectTypeNotebook) // Content = `print(1)`. Upload as file if content is not notebook (even if path has .py extension) - RequireSuccessfulRun(t, "workspace", "import", path.Join(targetDir, "not-a-notebook.py"), "--content", + testcli.RequireSuccessfulRun(t, ctx, "workspace", "import", path.Join(targetDir, "not-a-notebook.py"), "--content", base64.StdEncoding.EncodeToString([]byte("print(1)")), "--format=AUTO") assertFilerFileContents(t, ctx, workspaceFiler, "not-a-notebook.py", "print(1)") assertWorkspaceFileType(t, ctx, workspaceFiler, "not-a-notebook.py", workspace.ObjectTypeFile) } -func TestAccImportFileFormatSource(t *testing.T) { +func TestImportFileFormatSource(t *testing.T) { ctx, workspaceFiler, targetDir := setupWorkspaceImportExportTest(t) - RequireSuccessfulRun(t, "workspace", "import", path.Join(targetDir, "pyNotebook"), "--file", "./testdata/import_dir/pyNotebook.py", "--language=PYTHON") + testcli.RequireSuccessfulRun(t, ctx, "workspace", "import", path.Join(targetDir, "pyNotebook"), "--file", "./testdata/import_dir/pyNotebook.py", "--language=PYTHON") assertFilerFileContents(t, ctx, workspaceFiler, "pyNotebook", "# Databricks notebook source\nprint(\"python\")") assertWorkspaceFileType(t, ctx, workspaceFiler, "pyNotebook", workspace.ObjectTypeNotebook) - RequireSuccessfulRun(t, "workspace", "import", path.Join(targetDir, "scalaNotebook"), "--file", "./testdata/import_dir/scalaNotebook.scala", "--language=SCALA") + testcli.RequireSuccessfulRun(t, ctx, "workspace", "import", path.Join(targetDir, "scalaNotebook"), "--file", "./testdata/import_dir/scalaNotebook.scala", "--language=SCALA") assertFilerFileContents(t, ctx, workspaceFiler, "scalaNotebook", "// Databricks notebook source\nprintln(\"scala\")") assertWorkspaceFileType(t, ctx, workspaceFiler, "scalaNotebook", workspace.ObjectTypeNotebook) - _, _, err := RequireErrorRun(t, "workspace", "import", path.Join(targetDir, "scalaNotebook"), "--file", "./testdata/import_dir/scalaNotebook.scala") + _, _, err := testcli.RequireErrorRun(t, ctx, "workspace", "import", path.Join(targetDir, "scalaNotebook"), "--file", "./testdata/import_dir/scalaNotebook.scala") assert.ErrorContains(t, err, "The zip file may not be valid or may be an unsupported version. Hint: Objects imported using format=SOURCE are expected to be zip encoded databricks source notebook(s) by default. Please specify a language using the --language flag if you are trying to import a single uncompressed notebook") } -func TestAccImportFileFormatAuto(t *testing.T) { +func TestImportFileFormatAuto(t *testing.T) { ctx, workspaceFiler, targetDir := setupWorkspaceImportExportTest(t) // Upload as file if path has no extension - RequireSuccessfulRun(t, "workspace", "import", path.Join(targetDir, "py-nb-as-file"), "--file", "./testdata/import_dir/pyNotebook.py", "--format=AUTO") + testcli.RequireSuccessfulRun(t, ctx, "workspace", "import", path.Join(targetDir, "py-nb-as-file"), "--file", "./testdata/import_dir/pyNotebook.py", "--format=AUTO") assertFilerFileContents(t, ctx, workspaceFiler, "py-nb-as-file", "# Databricks notebook source") assertFilerFileContents(t, ctx, workspaceFiler, "py-nb-as-file", "print(\"python\")") assertWorkspaceFileType(t, ctx, workspaceFiler, "py-nb-as-file", workspace.ObjectTypeFile) // Upload as notebook if path has extension - RequireSuccessfulRun(t, "workspace", "import", path.Join(targetDir, "py-nb-as-notebook.py"), "--file", "./testdata/import_dir/pyNotebook.py", "--format=AUTO") + testcli.RequireSuccessfulRun(t, ctx, "workspace", "import", path.Join(targetDir, "py-nb-as-notebook.py"), "--file", "./testdata/import_dir/pyNotebook.py", "--format=AUTO") assertFilerFileContents(t, ctx, workspaceFiler, "py-nb-as-notebook", "# Databricks notebook source\nprint(\"python\")") assertWorkspaceFileType(t, ctx, workspaceFiler, "py-nb-as-notebook", workspace.ObjectTypeNotebook) // Upload as file if content is not notebook (even if path has .py extension) - RequireSuccessfulRun(t, "workspace", "import", path.Join(targetDir, "not-a-notebook.py"), "--file", "./testdata/import_dir/file-a", "--format=AUTO") + testcli.RequireSuccessfulRun(t, ctx, "workspace", "import", path.Join(targetDir, "not-a-notebook.py"), "--file", "./testdata/import_dir/file-a", "--format=AUTO") assertFilerFileContents(t, ctx, workspaceFiler, "not-a-notebook.py", "hello, world") assertWorkspaceFileType(t, ctx, workspaceFiler, "not-a-notebook.py", workspace.ObjectTypeFile) } diff --git a/integration/enforce_convention_test.go b/integration/enforce_convention_test.go new file mode 100644 index 000000000..cc822a6a3 --- /dev/null +++ b/integration/enforce_convention_test.go @@ -0,0 +1,116 @@ +package integration + +import ( + "go/parser" + "go/token" + "os" + "path/filepath" + "strings" + "testing" + "text/template" + + "golang.org/x/exp/maps" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +type packageInfo struct { + Name string + Files []string +} + +func enumeratePackages(t *testing.T) map[string]packageInfo { + pkgmap := make(map[string]packageInfo) + err := filepath.Walk(".", func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + // Skip files. + if !info.IsDir() { + return nil + } + + // Skip the root directory and the "internal" directory. + if path == "." || strings.HasPrefix(path, "internal") { + return nil + } + + fset := token.NewFileSet() + pkgs, err := parser.ParseDir(fset, path, nil, parser.ParseComments) + require.NoError(t, err) + if len(pkgs) == 0 { + return nil + } + + // Expect one package per directory. + require.Len(t, pkgs, 1, "Directory %s contains more than one package", path) + v := maps.Values(pkgs)[0] + + // Record the package. + pkgmap[path] = packageInfo{ + Name: v.Name, + Files: maps.Keys(v.Files), + } + return nil + }) + require.NoError(t, err) + return pkgmap +} + +// TestEnforcePackageNames checks that all integration test package names use the "_test" suffix. +// We enforce this package name to avoid package name aliasing. +func TestEnforcePackageNames(t *testing.T) { + pkgmap := enumeratePackages(t) + for _, pkg := range pkgmap { + assert.True(t, strings.HasSuffix(pkg.Name, "_test"), "Package name %s does not end with _test", pkg.Name) + } +} + +var mainTestTemplate = template.Must(template.New("main_test").Parse( + `package {{.Name}} + +import ( + "testing" + + "github.com/databricks/cli/integration/internal" +) + +// TestMain is the entrypoint executed by the test runner. +// See [internal.Main] for prerequisites for running integration tests. +func TestMain(m *testing.M) { + internal.Main(m) +} +`)) + +func TestEnforceMainTest(t *testing.T) { + pkgmap := enumeratePackages(t) + for dir, pkg := range pkgmap { + found := false + for _, file := range pkg.Files { + if filepath.Base(file) == "main_test.go" { + found = true + break + } + } + + // Expect a "main_test.go" file in each package. + assert.True(t, found, "Directory %s does not contain a main_test.go file", dir) + } +} + +func TestWriteMainTest(t *testing.T) { + t.Skip("Uncomment to write main_test.go files") + + pkgmap := enumeratePackages(t) + for dir, pkg := range pkgmap { + // Write a "main_test.go" file to the package. + // This file is required to run the integration tests. + f, err := os.Create(filepath.Join(dir, "main_test.go")) + require.NoError(t, err) + defer f.Close() + err = mainTestTemplate.Execute(f, pkg) + require.NoError(t, err) + } +} diff --git a/internal/acc/debug.go b/integration/internal/acc/debug.go similarity index 89% rename from internal/acc/debug.go rename to integration/internal/acc/debug.go index 116631132..b4939881e 100644 --- a/internal/acc/debug.go +++ b/integration/internal/acc/debug.go @@ -6,7 +6,8 @@ import ( "path" "path/filepath" "strings" - "testing" + + "github.com/databricks/cli/internal/testutil" ) // Detects if test is run from "debug test" feature in VS Code. @@ -16,7 +17,7 @@ func isInDebug() bool { } // Loads debug environment from ~/.databricks/debug-env.json. -func loadDebugEnvIfRunFromIDE(t *testing.T, key string) { +func loadDebugEnvIfRunFromIDE(t testutil.TestingT, key string) { if !isInDebug() { return } diff --git a/integration/internal/acc/fixtures.go b/integration/internal/acc/fixtures.go new file mode 100644 index 000000000..cd867fb3a --- /dev/null +++ b/integration/internal/acc/fixtures.go @@ -0,0 +1,133 @@ +package acc + +import ( + "fmt" + + "github.com/databricks/cli/internal/testutil" + "github.com/databricks/databricks-sdk-go/apierr" + "github.com/databricks/databricks-sdk-go/service/catalog" + "github.com/databricks/databricks-sdk-go/service/files" + "github.com/databricks/databricks-sdk-go/service/workspace" + "github.com/stretchr/testify/require" +) + +func TemporaryWorkspaceDir(t *WorkspaceT, name ...string) string { + ctx := t.ctx + me, err := t.W.CurrentUser.Me(ctx) + require.NoError(t, err) + + // Prefix the name with "integration-test-" to make it easier to identify. + name = append([]string{"integration-test-"}, name...) + basePath := fmt.Sprintf("/Users/%s/%s", me.UserName, testutil.RandomName(name...)) + + t.Logf("Creating workspace directory %s", basePath) + err = t.W.Workspace.MkdirsByPath(ctx, basePath) + require.NoError(t, err) + + // Remove test directory on test completion. + t.Cleanup(func() { + t.Logf("Removing workspace directory %s", basePath) + err := t.W.Workspace.Delete(ctx, workspace.Delete{ + Path: basePath, + Recursive: true, + }) + if err == nil || apierr.IsMissing(err) { + return + } + t.Logf("Unable to remove temporary workspace directory %s: %#v", basePath, err) + }) + + return basePath +} + +func TemporaryDbfsDir(t *WorkspaceT, name ...string) string { + ctx := t.ctx + + // Prefix the name with "integration-test-" to make it easier to identify. + name = append([]string{"integration-test-"}, name...) + path := fmt.Sprintf("/tmp/%s", testutil.RandomName(name...)) + + t.Logf("Creating DBFS directory %s", path) + err := t.W.Dbfs.MkdirsByPath(ctx, path) + require.NoError(t, err) + + t.Cleanup(func() { + t.Logf("Removing DBFS directory %s", path) + err := t.W.Dbfs.Delete(ctx, files.Delete{ + Path: path, + Recursive: true, + }) + if err == nil || apierr.IsMissing(err) { + return + } + t.Logf("Unable to remove temporary DBFS directory %s: %#v", path, err) + }) + + return path +} + +func TemporaryRepo(t *WorkspaceT, url string) string { + ctx := t.ctx + me, err := t.W.CurrentUser.Me(ctx) + require.NoError(t, err) + + // Prefix the path with "integration-test-" to make it easier to identify. + path := fmt.Sprintf("/Repos/%s/%s", me.UserName, testutil.RandomName("integration-test-")) + + t.Logf("Creating repo: %s", path) + resp, err := t.W.Repos.Create(ctx, workspace.CreateRepoRequest{ + Url: url, + Path: path, + Provider: "gitHub", + }) + require.NoError(t, err) + + t.Cleanup(func() { + t.Logf("Removing repo: %s", path) + err := t.W.Repos.Delete(ctx, workspace.DeleteRepoRequest{ + RepoId: resp.Id, + }) + if err == nil || apierr.IsMissing(err) { + return + } + t.Logf("Unable to remove repo %s: %#v", path, err) + }) + + return path +} + +// Create a new Unity Catalog volume in a catalog called "main" in the workspace. +func TemporaryVolume(t *WorkspaceT) string { + ctx := t.ctx + w := t.W + + // Create a schema + schema, err := w.Schemas.Create(ctx, catalog.CreateSchema{ + CatalogName: "main", + Name: testutil.RandomName("test-schema-"), + }) + require.NoError(t, err) + t.Cleanup(func() { + err := w.Schemas.Delete(ctx, catalog.DeleteSchemaRequest{ + FullName: schema.FullName, + }) + require.NoError(t, err) + }) + + // Create a volume + volume, err := w.Volumes.Create(ctx, catalog.CreateVolumeRequestContent{ + CatalogName: "main", + SchemaName: schema.Name, + Name: "my-volume", + VolumeType: catalog.VolumeTypeManaged, + }) + require.NoError(t, err) + t.Cleanup(func() { + err := w.Volumes.Delete(ctx, catalog.DeleteVolumeRequest{ + Name: volume.FullName, + }) + require.NoError(t, err) + }) + + return fmt.Sprintf("/Volumes/%s/%s/%s", "main", schema.Name, volume.Name) +} diff --git a/internal/acc/workspace.go b/integration/internal/acc/workspace.go similarity index 59% rename from internal/acc/workspace.go rename to integration/internal/acc/workspace.go index 69ab0e715..2f8a5b8e7 100644 --- a/internal/acc/workspace.go +++ b/integration/internal/acc/workspace.go @@ -2,19 +2,16 @@ package acc import ( "context" - "fmt" "os" - "testing" + "github.com/databricks/cli/internal/testutil" "github.com/databricks/databricks-sdk-go" - "github.com/databricks/databricks-sdk-go/apierr" "github.com/databricks/databricks-sdk-go/service/compute" - "github.com/databricks/databricks-sdk-go/service/workspace" "github.com/stretchr/testify/require" ) type WorkspaceT struct { - *testing.T + testutil.TestingT W *databricks.WorkspaceClient @@ -23,16 +20,16 @@ type WorkspaceT struct { exec *compute.CommandExecutorV2 } -func WorkspaceTest(t *testing.T) (context.Context, *WorkspaceT) { +func WorkspaceTest(t testutil.TestingT) (context.Context, *WorkspaceT) { loadDebugEnvIfRunFromIDE(t, "workspace") - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) + t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) w, err := databricks.NewWorkspaceClient() require.NoError(t, err) wt := &WorkspaceT{ - T: t, + TestingT: t, W: w, @@ -43,10 +40,10 @@ func WorkspaceTest(t *testing.T) (context.Context, *WorkspaceT) { } // Run the workspace test only on UC workspaces. -func UcWorkspaceTest(t *testing.T) (context.Context, *WorkspaceT) { +func UcWorkspaceTest(t testutil.TestingT) (context.Context, *WorkspaceT) { loadDebugEnvIfRunFromIDE(t, "workspace") - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) + t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) if os.Getenv("TEST_METASTORE_ID") == "" { t.Skipf("Skipping on non-UC workspaces") @@ -59,7 +56,7 @@ func UcWorkspaceTest(t *testing.T) (context.Context, *WorkspaceT) { require.NoError(t, err) wt := &WorkspaceT{ - T: t, + TestingT: t, W: w, @@ -70,7 +67,7 @@ func UcWorkspaceTest(t *testing.T) (context.Context, *WorkspaceT) { } func (t *WorkspaceT) TestClusterID() string { - clusterID := GetEnvOrSkipTest(t.T, "TEST_BRICKS_CLUSTER_ID") + clusterID := testutil.GetEnvOrSkipTest(t, "TEST_BRICKS_CLUSTER_ID") err := t.W.Clusters.EnsureClusterIsRunning(t.ctx, clusterID) require.NoError(t, err) return clusterID @@ -97,30 +94,3 @@ func (t *WorkspaceT) RunPython(code string) (string, error) { require.True(t, ok, "unexpected type %T", results.Data) return output, nil } - -func (t *WorkspaceT) TemporaryWorkspaceDir(name ...string) string { - ctx := context.Background() - me, err := t.W.CurrentUser.Me(ctx) - require.NoError(t, err) - - basePath := fmt.Sprintf("/Users/%s/%s", me.UserName, RandomName(name...)) - - t.Logf("Creating %s", basePath) - err = t.W.Workspace.MkdirsByPath(ctx, basePath) - require.NoError(t, err) - - // Remove test directory on test completion. - t.Cleanup(func() { - t.Logf("Removing %s", basePath) - err := t.W.Workspace.Delete(ctx, workspace.Delete{ - Path: basePath, - Recursive: true, - }) - if err == nil || apierr.IsMissing(err) { - return - } - t.Logf("Unable to remove temporary workspace directory %s: %#v", basePath, err) - }) - - return basePath -} diff --git a/integration/internal/main.go b/integration/internal/main.go new file mode 100644 index 000000000..6d69dcf70 --- /dev/null +++ b/integration/internal/main.go @@ -0,0 +1,20 @@ +package internal + +import ( + "fmt" + "os" + "testing" +) + +// Main is the entry point for integration tests. +// We use this for all integration tests defined in this subtree to ensure +// they are not inadvertently executed when calling `go test ./...`. +func Main(m *testing.M) { + value := os.Getenv("CLOUD_ENV") + if value == "" { + fmt.Println("CLOUD_ENV is not set, skipping integration tests") + return + } + + m.Run() +} diff --git a/internal/filer_test.go b/integration/libs/filer/filer_test.go similarity index 89% rename from internal/filer_test.go rename to integration/libs/filer/filer_test.go index 4e6a15671..766f9817b 100644 --- a/internal/filer_test.go +++ b/integration/libs/filer/filer_test.go @@ -1,4 +1,4 @@ -package internal +package filer_test import ( "bytes" @@ -12,6 +12,7 @@ import ( "strings" "testing" + "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/filer" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -22,7 +23,7 @@ type filerTest struct { filer.Filer } -func (f filerTest) assertContents(ctx context.Context, name string, contents string) { +func (f filerTest) assertContents(ctx context.Context, name, contents string) { reader, err := f.Read(ctx, name) if !assert.NoError(f, err) { return @@ -39,7 +40,7 @@ func (f filerTest) assertContents(ctx context.Context, name string, contents str assert.Equal(f, contents, body.String()) } -func (f filerTest) assertContentsJupyter(ctx context.Context, name string, language string) { +func (f filerTest) assertContentsJupyter(ctx context.Context, name, language string) { reader, err := f.Read(ctx, name) if !assert.NoError(f, err) { return @@ -116,12 +117,12 @@ func commonFilerRecursiveDeleteTest(t *testing.T, ctx context.Context, f filer.F assert.ErrorAs(t, err, &filer.NoSuchDirectoryError{}) } -func TestAccFilerRecursiveDelete(t *testing.T) { +func TestFilerRecursiveDelete(t *testing.T) { t.Parallel() for _, testCase := range []struct { name string - f func(t *testing.T) (filer.Filer, string) + f func(t testutil.TestingT) (filer.Filer, string) }{ {"local", setupLocalFiler}, {"workspace files", setupWsfsFiler}, @@ -227,12 +228,12 @@ func commonFilerReadWriteTests(t *testing.T, ctx context.Context, f filer.Filer) assert.True(t, errors.Is(err, fs.ErrInvalid)) } -func TestAccFilerReadWrite(t *testing.T) { +func TestFilerReadWrite(t *testing.T) { t.Parallel() for _, testCase := range []struct { name string - f func(t *testing.T) (filer.Filer, string) + f func(t testutil.TestingT) (filer.Filer, string) }{ {"local", setupLocalFiler}, {"workspace files", setupWsfsFiler}, @@ -336,12 +337,12 @@ func commonFilerReadDirTest(t *testing.T, ctx context.Context, f filer.Filer) { assert.False(t, entries[0].IsDir()) } -func TestAccFilerReadDir(t *testing.T) { +func TestFilerReadDir(t *testing.T) { t.Parallel() for _, testCase := range []struct { name string - f func(t *testing.T) (filer.Filer, string) + f func(t testutil.TestingT) (filer.Filer, string) }{ {"local", setupLocalFiler}, {"workspace files", setupWsfsFiler}, @@ -361,7 +362,7 @@ func TestAccFilerReadDir(t *testing.T) { } } -func TestAccFilerWorkspaceNotebook(t *testing.T) { +func TestFilerWorkspaceNotebook(t *testing.T) { t.Parallel() ctx := context.Background() @@ -410,33 +411,33 @@ func TestAccFilerWorkspaceNotebook(t *testing.T) { { name: "pythonJupyterNb.ipynb", nameWithoutExt: "pythonJupyterNb", - content1: readFile(t, "testdata/notebooks/py1.ipynb"), + content1: testutil.ReadFile(t, "testdata/notebooks/py1.ipynb"), expected1: "# Databricks notebook source\nprint(1)", - content2: readFile(t, "testdata/notebooks/py2.ipynb"), + content2: testutil.ReadFile(t, "testdata/notebooks/py2.ipynb"), expected2: "# Databricks notebook source\nprint(2)", }, { name: "rJupyterNb.ipynb", nameWithoutExt: "rJupyterNb", - content1: readFile(t, "testdata/notebooks/r1.ipynb"), + content1: testutil.ReadFile(t, "testdata/notebooks/r1.ipynb"), expected1: "# Databricks notebook source\nprint(1)", - content2: readFile(t, "testdata/notebooks/r2.ipynb"), + content2: testutil.ReadFile(t, "testdata/notebooks/r2.ipynb"), expected2: "# Databricks notebook source\nprint(2)", }, { name: "scalaJupyterNb.ipynb", nameWithoutExt: "scalaJupyterNb", - content1: readFile(t, "testdata/notebooks/scala1.ipynb"), + content1: testutil.ReadFile(t, "testdata/notebooks/scala1.ipynb"), expected1: "// Databricks notebook source\nprintln(1)", - content2: readFile(t, "testdata/notebooks/scala2.ipynb"), + content2: testutil.ReadFile(t, "testdata/notebooks/scala2.ipynb"), expected2: "// Databricks notebook source\nprintln(2)", }, { name: "sqlJupyterNotebook.ipynb", nameWithoutExt: "sqlJupyterNotebook", - content1: readFile(t, "testdata/notebooks/sql1.ipynb"), + content1: testutil.ReadFile(t, "testdata/notebooks/sql1.ipynb"), expected1: "-- Databricks notebook source\nselect 1", - content2: readFile(t, "testdata/notebooks/sql2.ipynb"), + content2: testutil.ReadFile(t, "testdata/notebooks/sql2.ipynb"), expected2: "-- Databricks notebook source\nselect 2", }, } @@ -468,10 +469,9 @@ func TestAccFilerWorkspaceNotebook(t *testing.T) { filerTest{t, f}.assertContents(ctx, tc.nameWithoutExt, tc.expected2) }) } - } -func TestAccFilerWorkspaceFilesExtensionsReadDir(t *testing.T) { +func TestFilerWorkspaceFilesExtensionsReadDir(t *testing.T) { t.Parallel() files := []struct { @@ -484,13 +484,13 @@ func TestAccFilerWorkspaceFilesExtensionsReadDir(t *testing.T) { {"foo.r", "print('foo')"}, {"foo.scala", "println('foo')"}, {"foo.sql", "SELECT 'foo'"}, - {"py1.ipynb", readFile(t, "testdata/notebooks/py1.ipynb")}, + {"py1.ipynb", testutil.ReadFile(t, "testdata/notebooks/py1.ipynb")}, {"pyNb.py", "# Databricks notebook source\nprint('first upload'))"}, - {"r1.ipynb", readFile(t, "testdata/notebooks/r1.ipynb")}, + {"r1.ipynb", testutil.ReadFile(t, "testdata/notebooks/r1.ipynb")}, {"rNb.r", "# Databricks notebook source\nprint('first upload'))"}, - {"scala1.ipynb", readFile(t, "testdata/notebooks/scala1.ipynb")}, + {"scala1.ipynb", testutil.ReadFile(t, "testdata/notebooks/scala1.ipynb")}, {"scalaNb.scala", "// Databricks notebook source\n println(\"first upload\"))"}, - {"sql1.ipynb", readFile(t, "testdata/notebooks/sql1.ipynb")}, + {"sql1.ipynb", testutil.ReadFile(t, "testdata/notebooks/sql1.ipynb")}, {"sqlNb.sql", "-- Databricks notebook source\n SELECT \"first upload\""}, } @@ -555,10 +555,10 @@ func setupFilerWithExtensionsTest(t *testing.T) filer.Filer { }{ {"foo.py", "# Databricks notebook source\nprint('first upload'))"}, {"bar.py", "print('foo')"}, - {"p1.ipynb", readFile(t, "testdata/notebooks/py1.ipynb")}, - {"r1.ipynb", readFile(t, "testdata/notebooks/r1.ipynb")}, - {"scala1.ipynb", readFile(t, "testdata/notebooks/scala1.ipynb")}, - {"sql1.ipynb", readFile(t, "testdata/notebooks/sql1.ipynb")}, + {"p1.ipynb", testutil.ReadFile(t, "testdata/notebooks/py1.ipynb")}, + {"r1.ipynb", testutil.ReadFile(t, "testdata/notebooks/r1.ipynb")}, + {"scala1.ipynb", testutil.ReadFile(t, "testdata/notebooks/scala1.ipynb")}, + {"sql1.ipynb", testutil.ReadFile(t, "testdata/notebooks/sql1.ipynb")}, {"pretender", "not a notebook"}, {"dir/file.txt", "file content"}, {"scala-notebook.scala", "// Databricks notebook source\nprintln('first upload')"}, @@ -575,7 +575,7 @@ func setupFilerWithExtensionsTest(t *testing.T) filer.Filer { return wf } -func TestAccFilerWorkspaceFilesExtensionsRead(t *testing.T) { +func TestFilerWorkspaceFilesExtensionsRead(t *testing.T) { t.Parallel() ctx := context.Background() @@ -612,7 +612,7 @@ func TestAccFilerWorkspaceFilesExtensionsRead(t *testing.T) { assert.ErrorIs(t, err, fs.ErrNotExist) } -func TestAccFilerWorkspaceFilesExtensionsDelete(t *testing.T) { +func TestFilerWorkspaceFilesExtensionsDelete(t *testing.T) { t.Parallel() ctx := context.Background() @@ -661,7 +661,7 @@ func TestAccFilerWorkspaceFilesExtensionsDelete(t *testing.T) { filerTest{t, wf}.assertNotExists(ctx, "dir") } -func TestAccFilerWorkspaceFilesExtensionsStat(t *testing.T) { +func TestFilerWorkspaceFilesExtensionsStat(t *testing.T) { t.Parallel() ctx := context.Background() @@ -708,7 +708,7 @@ func TestAccFilerWorkspaceFilesExtensionsStat(t *testing.T) { } } -func TestAccWorkspaceFilesExtensionsDirectoriesAreNotNotebooks(t *testing.T) { +func TestWorkspaceFilesExtensionsDirectoriesAreNotNotebooks(t *testing.T) { t.Parallel() ctx := context.Background() @@ -723,14 +723,14 @@ func TestAccWorkspaceFilesExtensionsDirectoriesAreNotNotebooks(t *testing.T) { assert.ErrorIs(t, err, fs.ErrNotExist) } -func TestAccWorkspaceFilesExtensionsNotebooksAreNotReadAsFiles(t *testing.T) { +func TestWorkspaceFilesExtensionsNotebooksAreNotReadAsFiles(t *testing.T) { t.Parallel() ctx := context.Background() wf, _ := setupWsfsExtensionsFiler(t) // Create a notebook - err := wf.Write(ctx, "foo.ipynb", strings.NewReader(readFile(t, "testdata/notebooks/py1.ipynb"))) + err := wf.Write(ctx, "foo.ipynb", strings.NewReader(testutil.ReadFile(t, "testdata/notebooks/py1.ipynb"))) require.NoError(t, err) // Reading foo should fail. Even though the WSFS name for the notebook is foo @@ -742,14 +742,14 @@ func TestAccWorkspaceFilesExtensionsNotebooksAreNotReadAsFiles(t *testing.T) { assert.NoError(t, err) } -func TestAccWorkspaceFilesExtensionsNotebooksAreNotStatAsFiles(t *testing.T) { +func TestWorkspaceFilesExtensionsNotebooksAreNotStatAsFiles(t *testing.T) { t.Parallel() ctx := context.Background() wf, _ := setupWsfsExtensionsFiler(t) // Create a notebook - err := wf.Write(ctx, "foo.ipynb", strings.NewReader(readFile(t, "testdata/notebooks/py1.ipynb"))) + err := wf.Write(ctx, "foo.ipynb", strings.NewReader(testutil.ReadFile(t, "testdata/notebooks/py1.ipynb"))) require.NoError(t, err) // Stating foo should fail. Even though the WSFS name for the notebook is foo @@ -761,14 +761,14 @@ func TestAccWorkspaceFilesExtensionsNotebooksAreNotStatAsFiles(t *testing.T) { assert.NoError(t, err) } -func TestAccWorkspaceFilesExtensionsNotebooksAreNotDeletedAsFiles(t *testing.T) { +func TestWorkspaceFilesExtensionsNotebooksAreNotDeletedAsFiles(t *testing.T) { t.Parallel() ctx := context.Background() wf, _ := setupWsfsExtensionsFiler(t) // Create a notebook - err := wf.Write(ctx, "foo.ipynb", strings.NewReader(readFile(t, "testdata/notebooks/py1.ipynb"))) + err := wf.Write(ctx, "foo.ipynb", strings.NewReader(testutil.ReadFile(t, "testdata/notebooks/py1.ipynb"))) require.NoError(t, err) // Deleting foo should fail. Even though the WSFS name for the notebook is foo @@ -780,7 +780,7 @@ func TestAccWorkspaceFilesExtensionsNotebooksAreNotDeletedAsFiles(t *testing.T) assert.NoError(t, err) } -func TestAccWorkspaceFilesExtensions_ExportFormatIsPreserved(t *testing.T) { +func TestWorkspaceFilesExtensions_ExportFormatIsPreserved(t *testing.T) { t.Parallel() // Case 1: Writing source notebooks. @@ -850,25 +850,25 @@ func TestAccWorkspaceFilesExtensions_ExportFormatIsPreserved(t *testing.T) { language: "python", sourceName: "foo.py", jupyterName: "foo.ipynb", - jupyterContent: readFile(t, "testdata/notebooks/py1.ipynb"), + jupyterContent: testutil.ReadFile(t, "testdata/notebooks/py1.ipynb"), }, { language: "r", sourceName: "foo.r", jupyterName: "foo.ipynb", - jupyterContent: readFile(t, "testdata/notebooks/r1.ipynb"), + jupyterContent: testutil.ReadFile(t, "testdata/notebooks/r1.ipynb"), }, { language: "scala", sourceName: "foo.scala", jupyterName: "foo.ipynb", - jupyterContent: readFile(t, "testdata/notebooks/scala1.ipynb"), + jupyterContent: testutil.ReadFile(t, "testdata/notebooks/scala1.ipynb"), }, { language: "sql", sourceName: "foo.sql", jupyterName: "foo.ipynb", - jupyterContent: readFile(t, "testdata/notebooks/sql1.ipynb"), + jupyterContent: testutil.ReadFile(t, "testdata/notebooks/sql1.ipynb"), }, } { t.Run("jupyter_"+tc.language, func(t *testing.T) { diff --git a/integration/libs/filer/helpers_test.go b/integration/libs/filer/helpers_test.go new file mode 100644 index 000000000..a3a3aaae5 --- /dev/null +++ b/integration/libs/filer/helpers_test.go @@ -0,0 +1,73 @@ +package filer_test + +import ( + "errors" + "net/http" + "os" + "path" + "path/filepath" + + "github.com/databricks/cli/integration/internal/acc" + "github.com/databricks/cli/internal/testutil" + + "github.com/databricks/cli/libs/filer" + "github.com/databricks/databricks-sdk-go/apierr" + "github.com/stretchr/testify/require" +) + +func setupLocalFiler(t testutil.TestingT) (filer.Filer, string) { + tmp := t.TempDir() + f, err := filer.NewLocalClient(tmp) + require.NoError(t, err) + + return f, path.Join(filepath.ToSlash(tmp)) +} + +func setupWsfsFiler(t testutil.TestingT) (filer.Filer, string) { + ctx, wt := acc.WorkspaceTest(t) + + tmpdir := acc.TemporaryWorkspaceDir(wt) + f, err := filer.NewWorkspaceFilesClient(wt.W, tmpdir) + require.NoError(t, err) + + // Check if we can use this API here, skip test if we cannot. + _, err = f.Read(ctx, "we_use_this_call_to_test_if_this_api_is_enabled") + var aerr *apierr.APIError + if errors.As(err, &aerr) && aerr.StatusCode == http.StatusBadRequest { + t.Skip(aerr.Message) + } + + return f, tmpdir +} + +func setupWsfsExtensionsFiler(t testutil.TestingT) (filer.Filer, string) { + _, wt := acc.WorkspaceTest(t) + + tmpdir := acc.TemporaryWorkspaceDir(wt) + f, err := filer.NewWorkspaceFilesExtensionsClient(wt.W, tmpdir) + require.NoError(t, err) + return f, tmpdir +} + +func setupDbfsFiler(t testutil.TestingT) (filer.Filer, string) { + _, wt := acc.WorkspaceTest(t) + + tmpdir := acc.TemporaryDbfsDir(wt) + f, err := filer.NewDbfsClient(wt.W, tmpdir) + require.NoError(t, err) + return f, path.Join("dbfs:/", tmpdir) +} + +func setupUcVolumesFiler(t testutil.TestingT) (filer.Filer, string) { + _, wt := acc.WorkspaceTest(t) + + if os.Getenv("TEST_METASTORE_ID") == "" { + t.Skip("Skipping tests that require a UC Volume when metastore id is not set.") + } + + tmpdir := acc.TemporaryVolume(wt) + f, err := filer.NewFilesClient(wt.W, tmpdir) + require.NoError(t, err) + + return f, path.Join("dbfs:/", tmpdir) +} diff --git a/integration/libs/filer/main_test.go b/integration/libs/filer/main_test.go new file mode 100644 index 000000000..ca866d952 --- /dev/null +++ b/integration/libs/filer/main_test.go @@ -0,0 +1,13 @@ +package filer_test + +import ( + "testing" + + "github.com/databricks/cli/integration/internal" +) + +// TestMain is the entrypoint executed by the test runner. +// See [internal.Main] for prerequisites for running integration tests. +func TestMain(m *testing.M) { + internal.Main(m) +} diff --git a/internal/testdata/notebooks/py1.ipynb b/integration/libs/filer/testdata/notebooks/py1.ipynb similarity index 100% rename from internal/testdata/notebooks/py1.ipynb rename to integration/libs/filer/testdata/notebooks/py1.ipynb diff --git a/internal/testdata/notebooks/py2.ipynb b/integration/libs/filer/testdata/notebooks/py2.ipynb similarity index 100% rename from internal/testdata/notebooks/py2.ipynb rename to integration/libs/filer/testdata/notebooks/py2.ipynb diff --git a/internal/testdata/notebooks/r1.ipynb b/integration/libs/filer/testdata/notebooks/r1.ipynb similarity index 100% rename from internal/testdata/notebooks/r1.ipynb rename to integration/libs/filer/testdata/notebooks/r1.ipynb diff --git a/internal/testdata/notebooks/r2.ipynb b/integration/libs/filer/testdata/notebooks/r2.ipynb similarity index 100% rename from internal/testdata/notebooks/r2.ipynb rename to integration/libs/filer/testdata/notebooks/r2.ipynb diff --git a/internal/testdata/notebooks/scala1.ipynb b/integration/libs/filer/testdata/notebooks/scala1.ipynb similarity index 100% rename from internal/testdata/notebooks/scala1.ipynb rename to integration/libs/filer/testdata/notebooks/scala1.ipynb diff --git a/internal/testdata/notebooks/scala2.ipynb b/integration/libs/filer/testdata/notebooks/scala2.ipynb similarity index 100% rename from internal/testdata/notebooks/scala2.ipynb rename to integration/libs/filer/testdata/notebooks/scala2.ipynb diff --git a/internal/testdata/notebooks/sql1.ipynb b/integration/libs/filer/testdata/notebooks/sql1.ipynb similarity index 100% rename from internal/testdata/notebooks/sql1.ipynb rename to integration/libs/filer/testdata/notebooks/sql1.ipynb diff --git a/internal/testdata/notebooks/sql2.ipynb b/integration/libs/filer/testdata/notebooks/sql2.ipynb similarity index 100% rename from internal/testdata/notebooks/sql2.ipynb rename to integration/libs/filer/testdata/notebooks/sql2.ipynb diff --git a/internal/git_clone_test.go b/integration/libs/git/git_clone_test.go similarity index 83% rename from internal/git_clone_test.go rename to integration/libs/git/git_clone_test.go index 73c3db105..cbc2d091d 100644 --- a/internal/git_clone_test.go +++ b/integration/libs/git/git_clone_test.go @@ -1,4 +1,4 @@ -package internal +package git_test import ( "context" @@ -10,9 +10,7 @@ import ( "github.com/stretchr/testify/assert" ) -func TestAccGitClone(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) - +func TestGitClone(t *testing.T) { tmpDir := t.TempDir() ctx := context.Background() var err error @@ -32,9 +30,7 @@ func TestAccGitClone(t *testing.T) { assert.Contains(t, string(b), "ide") } -func TestAccGitCloneOnNonDefaultBranch(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) - +func TestGitCloneOnNonDefaultBranch(t *testing.T) { tmpDir := t.TempDir() ctx := context.Background() var err error @@ -53,9 +49,7 @@ func TestAccGitCloneOnNonDefaultBranch(t *testing.T) { assert.Contains(t, string(b), "dais-2022") } -func TestAccGitCloneErrorsWhenRepositoryDoesNotExist(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) - +func TestGitCloneErrorsWhenRepositoryDoesNotExist(t *testing.T) { tmpDir := t.TempDir() err := git.Clone(context.Background(), "https://github.com/monalisa/doesnot-exist.git", "", tmpDir) diff --git a/internal/git_fetch_test.go b/integration/libs/git/git_fetch_test.go similarity index 68% rename from internal/git_fetch_test.go rename to integration/libs/git/git_fetch_test.go index 5dab6be76..0998d775b 100644 --- a/internal/git_fetch_test.go +++ b/integration/libs/git/git_fetch_test.go @@ -1,21 +1,24 @@ -package internal +package git_test import ( "os" "os/exec" "path" "path/filepath" + "strings" "testing" - "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/integration/internal/acc" "github.com/databricks/cli/libs/dbr" "github.com/databricks/cli/libs/git" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) -const examplesRepoUrl = "https://github.com/databricks/bundle-examples" -const examplesRepoProvider = "gitHub" +const ( + examplesRepoUrl = "https://github.com/databricks/bundle-examples" + examplesRepoProvider = "gitHub" +) func assertFullGitInfo(t *testing.T, expectedRoot string, info git.RepositoryInfo) { assert.Equal(t, "main", info.CurrentBranch) @@ -35,19 +38,18 @@ func assertSparseGitInfo(t *testing.T, expectedRoot string, info git.RepositoryI assert.Equal(t, expectedRoot, info.WorktreeRoot) } -func TestAccFetchRepositoryInfoAPI_FromRepo(t *testing.T) { +func ensureWorkspacePrefix(root string) string { + // The fixture helper doesn't include /Workspace, so include it here. + if !strings.HasPrefix(root, "/Workspace/") { + return path.Join("/Workspace", root) + } + return root +} + +func TestFetchRepositoryInfoAPI_FromRepo(t *testing.T) { ctx, wt := acc.WorkspaceTest(t) - me, err := wt.W.CurrentUser.Me(ctx) - require.NoError(t, err) + targetPath := ensureWorkspacePrefix(acc.TemporaryRepo(wt, examplesRepoUrl)) - targetPath := acc.RandomName(path.Join("/Workspace/Users", me.UserName, "/testing-clone-bundle-examples-")) - stdout, stderr := RequireSuccessfulRun(t, "repos", "create", examplesRepoUrl, examplesRepoProvider, "--path", targetPath) - t.Cleanup(func() { - RequireSuccessfulRun(t, "repos", "delete", targetPath) - }) - - assert.Empty(t, stderr.String()) - assert.NotEmpty(t, stdout.String()) ctx = dbr.MockRuntime(ctx, true) for _, inputPath := range []string{ @@ -62,18 +64,14 @@ func TestAccFetchRepositoryInfoAPI_FromRepo(t *testing.T) { } } -func TestAccFetchRepositoryInfoAPI_FromNonRepo(t *testing.T) { +func TestFetchRepositoryInfoAPI_FromNonRepo(t *testing.T) { ctx, wt := acc.WorkspaceTest(t) - me, err := wt.W.CurrentUser.Me(ctx) + rootPath := ensureWorkspacePrefix(acc.TemporaryWorkspaceDir(wt, "testing-nonrepo-")) + + // Create directory inside this root path (this is cleaned up as part of the root path). + err := wt.W.Workspace.MkdirsByPath(ctx, path.Join(rootPath, "a/b/c")) require.NoError(t, err) - rootPath := acc.RandomName(path.Join("/Workspace/Users", me.UserName, "testing-nonrepo-")) - _, stderr := RequireSuccessfulRun(t, "workspace", "mkdirs", path.Join(rootPath, "a/b/c")) - t.Cleanup(func() { - RequireSuccessfulRun(t, "workspace", "delete", "--recursive", rootPath) - }) - - assert.Empty(t, stderr.String()) ctx = dbr.MockRuntime(ctx, true) tests := []struct { @@ -101,14 +99,14 @@ func TestAccFetchRepositoryInfoAPI_FromNonRepo(t *testing.T) { assert.NoError(t, err) } else { assert.Error(t, err) - assert.Contains(t, err.Error(), test.msg) + assert.ErrorContains(t, err, test.msg) } assertEmptyGitInfo(t, info) }) } } -func TestAccFetchRepositoryInfoDotGit_FromGitRepo(t *testing.T) { +func TestFetchRepositoryInfoDotGit_FromGitRepo(t *testing.T) { ctx, wt := acc.WorkspaceTest(t) repo := cloneRepoLocally(t, examplesRepoUrl) @@ -135,12 +133,12 @@ func cloneRepoLocally(t *testing.T, repoUrl string) string { return localRoot } -func TestAccFetchRepositoryInfoDotGit_FromNonGitRepo(t *testing.T) { +func TestFetchRepositoryInfoDotGit_FromNonGitRepo(t *testing.T) { ctx, wt := acc.WorkspaceTest(t) tempDir := t.TempDir() root := filepath.Join(tempDir, "repo") - require.NoError(t, os.MkdirAll(filepath.Join(root, "a/b/c"), 0700)) + require.NoError(t, os.MkdirAll(filepath.Join(root, "a/b/c"), 0o700)) tests := []string{ filepath.Join(root, "a/b/c"), @@ -151,20 +149,20 @@ func TestAccFetchRepositoryInfoDotGit_FromNonGitRepo(t *testing.T) { for _, input := range tests { t.Run(input, func(t *testing.T) { info, err := git.FetchRepositoryInfo(ctx, input, wt.W) - assert.NoError(t, err) + assert.ErrorIs(t, err, os.ErrNotExist) assertEmptyGitInfo(t, info) }) } } -func TestAccFetchRepositoryInfoDotGit_FromBrokenGitRepo(t *testing.T) { +func TestFetchRepositoryInfoDotGit_FromBrokenGitRepo(t *testing.T) { ctx, wt := acc.WorkspaceTest(t) tempDir := t.TempDir() root := filepath.Join(tempDir, "repo") path := filepath.Join(root, "a/b/c") - require.NoError(t, os.MkdirAll(path, 0700)) - require.NoError(t, os.WriteFile(filepath.Join(root, ".git"), []byte(""), 0000)) + require.NoError(t, os.MkdirAll(path, 0o700)) + require.NoError(t, os.WriteFile(filepath.Join(root, ".git"), []byte(""), 0o000)) info, err := git.FetchRepositoryInfo(ctx, path, wt.W) assert.NoError(t, err) diff --git a/integration/libs/git/main_test.go b/integration/libs/git/main_test.go new file mode 100644 index 000000000..5d68e0851 --- /dev/null +++ b/integration/libs/git/main_test.go @@ -0,0 +1,13 @@ +package git_test + +import ( + "testing" + + "github.com/databricks/cli/integration/internal" +) + +// TestMain is the entrypoint executed by the test runner. +// See [internal.Main] for prerequisites for running integration tests. +func TestMain(m *testing.M) { + internal.Main(m) +} diff --git a/internal/locker_test.go b/integration/libs/locker/locker_test.go similarity index 88% rename from internal/locker_test.go rename to integration/libs/locker/locker_test.go index 3ae783d1b..c51972b90 100644 --- a/internal/locker_test.go +++ b/integration/libs/locker/locker_test.go @@ -1,4 +1,4 @@ -package internal +package locker_test import ( "context" @@ -11,6 +11,8 @@ import ( "testing" "time" + "github.com/databricks/cli/integration/internal/acc" + "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/filer" lockpkg "github.com/databricks/cli/libs/locker" "github.com/databricks/databricks-sdk-go" @@ -28,7 +30,7 @@ func createRemoteTestProject(t *testing.T, projectNamePrefix string, wsc *databr me, err := wsc.CurrentUser.Me(ctx) assert.NoError(t, err) - remoteProjectRoot := fmt.Sprintf("/Repos/%s/%s", me.UserName, RandomName(projectNamePrefix)) + remoteProjectRoot := fmt.Sprintf("/Repos/%s/%s", me.UserName, testutil.RandomName(projectNamePrefix)) repoInfo, err := wsc.Repos.Create(ctx, workspace.CreateRepoRequest{ Path: remoteProjectRoot, Url: EmptyRepoUrl, @@ -43,11 +45,9 @@ func createRemoteTestProject(t *testing.T, projectNamePrefix string, wsc *databr return remoteProjectRoot } -func TestAccLock(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) - ctx := context.TODO() - wsc, err := databricks.NewWorkspaceClient() - require.NoError(t, err) +func TestLock(t *testing.T) { + ctx, wt := acc.WorkspaceTest(t) + wsc := wt.W remoteProjectRoot := createRemoteTestProject(t, "lock-acc-", wsc) // 5 lockers try to acquire a lock at the same time @@ -133,7 +133,8 @@ func TestAccLock(t *testing.T) { // assert on active locker content var res map[string]string - json.Unmarshal(b, &res) + err = json.Unmarshal(b, &res) + require.NoError(t, err) assert.NoError(t, err) assert.Equal(t, "Khan", res["surname"]) assert.Equal(t, "Shah Rukh", res["name"]) @@ -162,14 +163,12 @@ func TestAccLock(t *testing.T) { assert.True(t, lockers[indexOfAnInactiveLocker].Active) } -func setupLockerTest(ctx context.Context, t *testing.T) (*lockpkg.Locker, filer.Filer) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) - - w, err := databricks.NewWorkspaceClient() - require.NoError(t, err) +func setupLockerTest(t *testing.T) (context.Context, *lockpkg.Locker, filer.Filer) { + ctx, wt := acc.WorkspaceTest(t) + w := wt.W // create temp wsfs dir - tmpDir := TemporaryWorkspaceDir(t, w) + tmpDir := acc.TemporaryWorkspaceDir(wt, "locker-") f, err := filer.NewWorkspaceFilesClient(w, tmpDir) require.NoError(t, err) @@ -177,12 +176,11 @@ func setupLockerTest(ctx context.Context, t *testing.T) (*lockpkg.Locker, filer. locker, err := lockpkg.CreateLocker("redfoo@databricks.com", tmpDir, w) require.NoError(t, err) - return locker, f + return ctx, locker, f } -func TestAccLockUnlockWithoutAllowsLockFileNotExist(t *testing.T) { - ctx := context.Background() - locker, f := setupLockerTest(ctx, t) +func TestLockUnlockWithoutAllowsLockFileNotExist(t *testing.T) { + ctx, locker, f := setupLockerTest(t) var err error // Acquire lock on tmp directory @@ -202,9 +200,8 @@ func TestAccLockUnlockWithoutAllowsLockFileNotExist(t *testing.T) { assert.ErrorIs(t, err, fs.ErrNotExist) } -func TestAccLockUnlockWithAllowsLockFileNotExist(t *testing.T) { - ctx := context.Background() - locker, f := setupLockerTest(ctx, t) +func TestLockUnlockWithAllowsLockFileNotExist(t *testing.T) { + ctx, locker, f := setupLockerTest(t) var err error // Acquire lock on tmp directory diff --git a/integration/libs/locker/main_test.go b/integration/libs/locker/main_test.go new file mode 100644 index 000000000..33a883768 --- /dev/null +++ b/integration/libs/locker/main_test.go @@ -0,0 +1,13 @@ +package locker_test + +import ( + "testing" + + "github.com/databricks/cli/integration/internal" +) + +// TestMain is the entrypoint executed by the test runner. +// See [internal.Main] for prerequisites for running integration tests. +func TestMain(m *testing.M) { + internal.Main(m) +} diff --git a/integration/libs/tags/main_test.go b/integration/libs/tags/main_test.go new file mode 100644 index 000000000..4eaf54a20 --- /dev/null +++ b/integration/libs/tags/main_test.go @@ -0,0 +1,13 @@ +package tags_test + +import ( + "testing" + + "github.com/databricks/cli/integration/internal" +) + +// TestMain is the entrypoint executed by the test runner. +// See [internal.Main] for prerequisites for running integration tests. +func TestMain(m *testing.M) { + internal.Main(m) +} diff --git a/internal/tags_test.go b/integration/libs/tags/tags_test.go similarity index 84% rename from internal/tags_test.go rename to integration/libs/tags/tags_test.go index 2dd3759ac..8a54a966b 100644 --- a/internal/tags_test.go +++ b/integration/libs/tags/tags_test.go @@ -1,41 +1,27 @@ -package internal +package tags_test import ( - "context" "strings" "testing" + "github.com/databricks/cli/integration/internal/acc" "github.com/databricks/cli/internal/testutil" - "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/service/compute" "github.com/databricks/databricks-sdk-go/service/jobs" "github.com/stretchr/testify/require" ) func testTags(t *testing.T, tags map[string]string) error { - var nodeTypeId string - switch testutil.GetCloud(t) { - case testutil.AWS: - nodeTypeId = "i3.xlarge" - case testutil.Azure: - nodeTypeId = "Standard_DS4_v2" - case testutil.GCP: - nodeTypeId = "n1-standard-4" - } - - w, err := databricks.NewWorkspaceClient() - require.NoError(t, err) - - ctx := context.Background() - resp, err := w.Jobs.Create(ctx, jobs.CreateJob{ - Name: RandomName("test-tags-"), + ctx, wt := acc.WorkspaceTest(t) + resp, err := wt.W.Jobs.Create(ctx, jobs.CreateJob{ + Name: testutil.RandomName("test-tags-"), Tasks: []jobs.Task{ { TaskKey: "test", NewCluster: &compute.ClusterSpec{ SparkVersion: "13.3.x-scala2.12", NumWorkers: 1, - NodeTypeId: nodeTypeId, + NodeTypeId: testutil.GetCloud(t).NodeTypeID(), }, SparkPythonTask: &jobs.SparkPythonTask{ PythonFile: "/doesnt_exist.py", @@ -47,7 +33,11 @@ func testTags(t *testing.T, tags map[string]string) error { if resp != nil { t.Cleanup(func() { - w.Jobs.DeleteByJobId(ctx, resp.JobId) + _ = wt.W.Jobs.DeleteByJobId(ctx, resp.JobId) + // Cannot enable errchecking there, tests fail with: + // Error: Received unexpected error: + // Job 0 does not exist. + // require.NoError(t, err) }) } @@ -90,7 +80,7 @@ func runTagTestCases(t *testing.T, cases []tagTestCase) { } } -func TestAccTagKeyAWS(t *testing.T) { +func TestTagKeyAWS(t *testing.T) { testutil.Require(t, testutil.AWS) t.Parallel() @@ -122,7 +112,7 @@ func TestAccTagKeyAWS(t *testing.T) { }) } -func TestAccTagValueAWS(t *testing.T) { +func TestTagValueAWS(t *testing.T) { testutil.Require(t, testutil.AWS) t.Parallel() @@ -148,7 +138,7 @@ func TestAccTagValueAWS(t *testing.T) { }) } -func TestAccTagKeyAzure(t *testing.T) { +func TestTagKeyAzure(t *testing.T) { testutil.Require(t, testutil.Azure) t.Parallel() @@ -180,7 +170,7 @@ func TestAccTagKeyAzure(t *testing.T) { }) } -func TestAccTagValueAzure(t *testing.T) { +func TestTagValueAzure(t *testing.T) { testutil.Require(t, testutil.Azure) t.Parallel() @@ -200,7 +190,7 @@ func TestAccTagValueAzure(t *testing.T) { }) } -func TestAccTagKeyGCP(t *testing.T) { +func TestTagKeyGCP(t *testing.T) { testutil.Require(t, testutil.GCP) t.Parallel() @@ -232,7 +222,7 @@ func TestAccTagKeyGCP(t *testing.T) { }) } -func TestAccTagValueGCP(t *testing.T) { +func TestTagValueGCP(t *testing.T) { testutil.Require(t, testutil.GCP) t.Parallel() diff --git a/integration/python/main_test.go b/integration/python/main_test.go new file mode 100644 index 000000000..b35da21e1 --- /dev/null +++ b/integration/python/main_test.go @@ -0,0 +1,13 @@ +package python_test + +import ( + "testing" + + "github.com/databricks/cli/integration/internal" +) + +// TestMain is the entrypoint executed by the test runner. +// See [internal.Main] for prerequisites for running integration tests. +func TestMain(m *testing.M) { + internal.Main(m) +} diff --git a/internal/python/python_tasks_test.go b/integration/python/python_tasks_test.go similarity index 64% rename from internal/python/python_tasks_test.go rename to integration/python/python_tasks_test.go index fde9b37f6..9ad3ed5de 100644 --- a/internal/python/python_tasks_test.go +++ b/integration/python/python_tasks_test.go @@ -1,4 +1,4 @@ -package python +package python_test import ( "bytes" @@ -14,9 +14,11 @@ import ( "time" "github.com/databricks/cli/bundle/run/output" - "github.com/databricks/cli/internal" + "github.com/databricks/cli/integration/internal/acc" + "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/filer" "github.com/databricks/databricks-sdk-go" + "github.com/databricks/databricks-sdk-go/service/compute" "github.com/databricks/databricks-sdk-go/service/jobs" "github.com/databricks/databricks-sdk-go/service/workspace" "github.com/stretchr/testify/require" @@ -73,10 +75,9 @@ var sparkVersions = []string{ "14.1.x-scala2.12", } -func TestAccRunPythonTaskWorkspace(t *testing.T) { +func TestRunPythonTaskWorkspace(t *testing.T) { // TODO: remove RUN_PYTHON_TASKS_TEST when ready to be executed as part of nightly - internal.GetEnvOrSkipTest(t, "RUN_PYTHON_TASKS_TEST") - internal.GetEnvOrSkipTest(t, "CLOUD_ENV") + testutil.GetEnvOrSkipTest(t, "RUN_PYTHON_TASKS_TEST") unsupportedSparkVersionsForWheel := []string{ "11.3.x-scala2.12", @@ -94,10 +95,9 @@ func TestAccRunPythonTaskWorkspace(t *testing.T) { }) } -func TestAccRunPythonTaskDBFS(t *testing.T) { +func TestRunPythonTaskDBFS(t *testing.T) { // TODO: remove RUN_PYTHON_TASKS_TEST when ready to be executed as part of nightly - internal.GetEnvOrSkipTest(t, "RUN_PYTHON_TASKS_TEST") - internal.GetEnvOrSkipTest(t, "CLOUD_ENV") + testutil.GetEnvOrSkipTest(t, "RUN_PYTHON_TASKS_TEST") runPythonTasks(t, prepareDBFSFiles(t), testOpts{ name: "Python tasks from DBFS", @@ -107,10 +107,9 @@ func TestAccRunPythonTaskDBFS(t *testing.T) { }) } -func TestAccRunPythonTaskRepo(t *testing.T) { +func TestRunPythonTaskRepo(t *testing.T) { // TODO: remove RUN_PYTHON_TASKS_TEST when ready to be executed as part of nightly - internal.GetEnvOrSkipTest(t, "RUN_PYTHON_TASKS_TEST") - internal.GetEnvOrSkipTest(t, "CLOUD_ENV") + testutil.GetEnvOrSkipTest(t, "RUN_PYTHON_TASKS_TEST") runPythonTasks(t, prepareRepoFiles(t), testOpts{ name: "Python tasks from Repo", @@ -121,19 +120,16 @@ func TestAccRunPythonTaskRepo(t *testing.T) { } func runPythonTasks(t *testing.T, tw *testFiles, opts testOpts) { - env := internal.GetEnvOrSkipTest(t, "CLOUD_ENV") - t.Log(env) - w := tw.w - nodeTypeId := internal.GetNodeTypeId(env) + nodeTypeId := testutil.GetCloud(t).NodeTypeID() tasks := make([]jobs.SubmitTask, 0) if opts.includeNotebookTasks { - tasks = append(tasks, internal.GenerateNotebookTasks(tw.pyNotebookPath, sparkVersions, nodeTypeId)...) + tasks = append(tasks, GenerateNotebookTasks(tw.pyNotebookPath, sparkVersions, nodeTypeId)...) } if opts.includeSparkPythonTasks { - tasks = append(tasks, internal.GenerateSparkPythonTasks(tw.sparkPythonPath, sparkVersions, nodeTypeId)...) + tasks = append(tasks, GenerateSparkPythonTasks(tw.sparkPythonPath, sparkVersions, nodeTypeId)...) } if opts.includeWheelTasks { @@ -141,7 +137,7 @@ func runPythonTasks(t *testing.T, tw *testFiles, opts testOpts) { if len(opts.wheelSparkVersions) > 0 { versions = opts.wheelSparkVersions } - tasks = append(tasks, internal.GenerateWheelTasks(tw.wheelPath, versions, nodeTypeId)...) + tasks = append(tasks, GenerateWheelTasks(tw.wheelPath, versions, nodeTypeId)...) } ctx := context.Background() @@ -178,13 +174,13 @@ func runPythonTasks(t *testing.T, tw *testFiles, opts testOpts) { } func prepareWorkspaceFiles(t *testing.T) *testFiles { - ctx := context.Background() - w, err := databricks.NewWorkspaceClient() - require.NoError(t, err) + var err error + ctx, wt := acc.WorkspaceTest(t) + w := wt.W + + baseDir := acc.TemporaryWorkspaceDir(wt, "python-tasks-") - baseDir := internal.TemporaryWorkspaceDir(t, w) pyNotebookPath := path.Join(baseDir, "test.py") - err = w.Workspace.Import(ctx, workspace.Import{ Path: pyNotebookPath, Overwrite: true, @@ -224,11 +220,12 @@ func prepareWorkspaceFiles(t *testing.T) *testFiles { } func prepareDBFSFiles(t *testing.T) *testFiles { - ctx := context.Background() - w, err := databricks.NewWorkspaceClient() - require.NoError(t, err) + var err error + ctx, wt := acc.WorkspaceTest(t) + w := wt.W + + baseDir := acc.TemporaryDbfsDir(wt, "python-tasks-") - baseDir := internal.TemporaryDbfsDir(t, w) f, err := filer.NewDbfsClient(w, baseDir) require.NoError(t, err) @@ -253,15 +250,83 @@ func prepareDBFSFiles(t *testing.T) *testFiles { } func prepareRepoFiles(t *testing.T) *testFiles { - w, err := databricks.NewWorkspaceClient() - require.NoError(t, err) + _, wt := acc.WorkspaceTest(t) + w := wt.W + + baseDir := acc.TemporaryRepo(wt, "https://github.com/databricks/cli") - repo := internal.TemporaryRepo(t, w) packagePath := "internal/python/testdata" return &testFiles{ w: w, - pyNotebookPath: path.Join(repo, packagePath, "test"), - sparkPythonPath: path.Join(repo, packagePath, "spark.py"), - wheelPath: path.Join(repo, packagePath, "my_test_code-0.0.1-py3-none-any.whl"), + pyNotebookPath: path.Join(baseDir, packagePath, "test"), + sparkPythonPath: path.Join(baseDir, packagePath, "spark.py"), + wheelPath: path.Join(baseDir, packagePath, "my_test_code-0.0.1-py3-none-any.whl"), } } + +func GenerateNotebookTasks(notebookPath string, versions []string, nodeTypeId string) []jobs.SubmitTask { + tasks := make([]jobs.SubmitTask, 0) + for i := 0; i < len(versions); i++ { + task := jobs.SubmitTask{ + TaskKey: fmt.Sprintf("notebook_%s", strings.ReplaceAll(versions[i], ".", "_")), + NotebookTask: &jobs.NotebookTask{ + NotebookPath: notebookPath, + }, + NewCluster: &compute.ClusterSpec{ + SparkVersion: versions[i], + NumWorkers: 1, + NodeTypeId: nodeTypeId, + DataSecurityMode: compute.DataSecurityModeUserIsolation, + }, + } + tasks = append(tasks, task) + } + + return tasks +} + +func GenerateSparkPythonTasks(notebookPath string, versions []string, nodeTypeId string) []jobs.SubmitTask { + tasks := make([]jobs.SubmitTask, 0) + for i := 0; i < len(versions); i++ { + task := jobs.SubmitTask{ + TaskKey: fmt.Sprintf("spark_%s", strings.ReplaceAll(versions[i], ".", "_")), + SparkPythonTask: &jobs.SparkPythonTask{ + PythonFile: notebookPath, + }, + NewCluster: &compute.ClusterSpec{ + SparkVersion: versions[i], + NumWorkers: 1, + NodeTypeId: nodeTypeId, + DataSecurityMode: compute.DataSecurityModeUserIsolation, + }, + } + tasks = append(tasks, task) + } + + return tasks +} + +func GenerateWheelTasks(wheelPath string, versions []string, nodeTypeId string) []jobs.SubmitTask { + tasks := make([]jobs.SubmitTask, 0) + for i := 0; i < len(versions); i++ { + task := jobs.SubmitTask{ + TaskKey: fmt.Sprintf("whl_%s", strings.ReplaceAll(versions[i], ".", "_")), + PythonWheelTask: &jobs.PythonWheelTask{ + PackageName: "my_test_code", + EntryPoint: "run", + }, + NewCluster: &compute.ClusterSpec{ + SparkVersion: versions[i], + NumWorkers: 1, + NodeTypeId: nodeTypeId, + DataSecurityMode: compute.DataSecurityModeUserIsolation, + }, + Libraries: []compute.Library{ + {Whl: wheelPath}, + }, + } + tasks = append(tasks, task) + } + + return tasks +} diff --git a/internal/python/testdata/my_test_code-0.0.1-py3-none-any.whl b/integration/python/testdata/my_test_code-0.0.1-py3-none-any.whl similarity index 100% rename from internal/python/testdata/my_test_code-0.0.1-py3-none-any.whl rename to integration/python/testdata/my_test_code-0.0.1-py3-none-any.whl diff --git a/internal/python/testdata/spark.py b/integration/python/testdata/spark.py similarity index 100% rename from internal/python/testdata/spark.py rename to integration/python/testdata/spark.py diff --git a/internal/python/testdata/test.py b/integration/python/testdata/test.py similarity index 100% rename from internal/python/testdata/test.py rename to integration/python/testdata/test.py diff --git a/internal/alerts_test.go b/internal/alerts_test.go deleted file mode 100644 index 6d7544074..000000000 --- a/internal/alerts_test.go +++ /dev/null @@ -1,14 +0,0 @@ -package internal - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestAccAlertsCreateErrWhenNoArguments(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) - - _, _, err := RequireErrorRun(t, "alerts-legacy", "create") - assert.Equal(t, "please provide command input in JSON format by specifying the --json flag", err.Error()) -} diff --git a/internal/api_test.go b/internal/api_test.go deleted file mode 100644 index f3e8b7171..000000000 --- a/internal/api_test.go +++ /dev/null @@ -1,51 +0,0 @@ -package internal - -import ( - "encoding/json" - "fmt" - "path" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - _ "github.com/databricks/cli/cmd/api" -) - -func TestAccApiGet(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) - - stdout, _ := RequireSuccessfulRun(t, "api", "get", "/api/2.0/preview/scim/v2/Me") - - // Deserialize SCIM API response. - var out map[string]any - err := json.Unmarshal(stdout.Bytes(), &out) - require.NoError(t, err) - - // Assert that the output somewhat makes sense for the SCIM API. - assert.Equal(t, true, out["active"]) - assert.NotNil(t, out["id"]) -} - -func TestAccApiPost(t *testing.T) { - env := GetEnvOrSkipTest(t, "CLOUD_ENV") - t.Log(env) - if env == "gcp" { - t.Skip("DBFS REST API is disabled on gcp") - } - - dbfsPath := path.Join("/tmp/databricks/integration", RandomName("api-post")) - requestPath := writeFile(t, "body.json", fmt.Sprintf(`{ - "path": "%s" - }`, dbfsPath)) - - // Post to mkdir - { - RequireSuccessfulRun(t, "api", "post", "--json=@"+requestPath, "/api/2.0/dbfs/mkdirs") - } - - // Post to delete - { - RequireSuccessfulRun(t, "api", "post", "--json=@"+requestPath, "/api/2.0/dbfs/delete") - } -} diff --git a/internal/bugbash/exec.sh b/internal/bugbash/exec.sh index ac25b16ed..4a087dc66 100755 --- a/internal/bugbash/exec.sh +++ b/internal/bugbash/exec.sh @@ -31,7 +31,7 @@ function cli_snapshot_directory() { dir="${dir}_386" ;; arm64|aarch64) - dir="${dir}_arm64" + dir="${dir}_arm64_v8.0" ;; armv7l|armv8l) dir="${dir}_arm_6" diff --git a/internal/build/variables.go b/internal/build/variables.go index 197dee9c3..80c4683ab 100644 --- a/internal/build/variables.go +++ b/internal/build/variables.go @@ -1,21 +1,27 @@ package build -var buildProjectName string = "cli" -var buildVersion string = "" +var ( + buildProjectName string = "cli" + buildVersion string = "" +) -var buildBranch string = "undefined" -var buildTag string = "undefined" -var buildShortCommit string = "00000000" -var buildFullCommit string = "0000000000000000000000000000000000000000" -var buildCommitTimestamp string = "0" -var buildSummary string = "v0.0.0" +var ( + buildBranch string = "undefined" + buildTag string = "undefined" + buildShortCommit string = "00000000" + buildFullCommit string = "0000000000000000000000000000000000000000" + buildCommitTimestamp string = "0" + buildSummary string = "v0.0.0" +) -var buildMajor string = "0" -var buildMinor string = "0" -var buildPatch string = "0" -var buildPrerelease string = "" -var buildIsSnapshot string = "false" -var buildTimestamp string = "0" +var ( + buildMajor string = "0" + buildMinor string = "0" + buildPatch string = "0" + buildPrerelease string = "" + buildIsSnapshot string = "false" + buildTimestamp string = "0" +) // This function is used to set the build version for testing purposes. func SetBuildVersion(version string) { diff --git a/internal/bundle/basic_test.go b/internal/bundle/basic_test.go deleted file mode 100644 index c24ef0c05..000000000 --- a/internal/bundle/basic_test.go +++ /dev/null @@ -1,43 +0,0 @@ -package bundle - -import ( - "os" - "path/filepath" - "testing" - - "github.com/databricks/cli/internal" - "github.com/databricks/cli/internal/acc" - "github.com/databricks/cli/libs/env" - "github.com/google/uuid" - "github.com/stretchr/testify/require" -) - -func TestAccBasicBundleDeployWithFailOnActiveRuns(t *testing.T) { - ctx, _ := acc.WorkspaceTest(t) - - nodeTypeId := internal.GetNodeTypeId(env.Get(ctx, "CLOUD_ENV")) - uniqueId := uuid.New().String() - root, err := initTestTemplate(t, ctx, "basic", map[string]any{ - "unique_id": uniqueId, - "node_type_id": nodeTypeId, - "spark_version": defaultSparkVersion, - }) - require.NoError(t, err) - - t.Cleanup(func() { - err = destroyBundle(t, ctx, root) - require.NoError(t, err) - }) - - // deploy empty bundle - err = deployBundleWithFlags(t, ctx, root, []string{"--fail-on-active-runs"}) - require.NoError(t, err) - - // Remove .databricks directory to simulate a fresh deployment - err = os.RemoveAll(filepath.Join(root, ".databricks")) - require.NoError(t, err) - - // deploy empty bundle again - err = deployBundleWithFlags(t, ctx, root, []string{"--fail-on-active-runs"}) - require.NoError(t, err) -} diff --git a/internal/bundle/deploy_to_shared_test.go b/internal/bundle/deploy_to_shared_test.go deleted file mode 100644 index 568c1fb56..000000000 --- a/internal/bundle/deploy_to_shared_test.go +++ /dev/null @@ -1,38 +0,0 @@ -package bundle - -import ( - "fmt" - "testing" - - "github.com/databricks/cli/internal" - "github.com/databricks/cli/internal/acc" - "github.com/databricks/cli/libs/env" - "github.com/google/uuid" - "github.com/stretchr/testify/require" -) - -func TestAccDeployBasicToSharedWorkspacePath(t *testing.T) { - ctx, wt := acc.WorkspaceTest(t) - - nodeTypeId := internal.GetNodeTypeId(env.Get(ctx, "CLOUD_ENV")) - uniqueId := uuid.New().String() - - currentUser, err := wt.W.CurrentUser.Me(ctx) - require.NoError(t, err) - - bundleRoot, err := initTestTemplate(t, ctx, "basic", map[string]any{ - "unique_id": uniqueId, - "node_type_id": nodeTypeId, - "spark_version": defaultSparkVersion, - "root_path": fmt.Sprintf("/Shared/%s", currentUser.UserName), - }) - require.NoError(t, err) - - t.Cleanup(func() { - err = destroyBundle(wt.T, ctx, bundleRoot) - require.NoError(wt.T, err) - }) - - err = deployBundle(wt.T, ctx, bundleRoot) - require.NoError(wt.T, err) -} diff --git a/internal/clusters_test.go b/internal/clusters_test.go deleted file mode 100644 index 6daddcce3..000000000 --- a/internal/clusters_test.go +++ /dev/null @@ -1,40 +0,0 @@ -package internal - -import ( - "fmt" - "regexp" - "testing" - - "github.com/stretchr/testify/assert" -) - -var clusterId string - -func TestAccClustersList(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) - - stdout, stderr := RequireSuccessfulRun(t, "clusters", "list") - outStr := stdout.String() - assert.Contains(t, outStr, "ID") - assert.Contains(t, outStr, "Name") - assert.Contains(t, outStr, "State") - assert.Equal(t, "", stderr.String()) - - idRegExp := regexp.MustCompile(`[0-9]{4}\-[0-9]{6}-[a-z0-9]{8}`) - clusterId = idRegExp.FindString(outStr) - assert.NotEmpty(t, clusterId) -} - -func TestAccClustersGet(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) - - stdout, stderr := RequireSuccessfulRun(t, "clusters", "get", clusterId) - outStr := stdout.String() - assert.Contains(t, outStr, fmt.Sprintf(`"cluster_id":"%s"`, clusterId)) - assert.Equal(t, "", stderr.String()) -} - -func TestClusterCreateErrorWhenNoArguments(t *testing.T) { - _, _, err := RequireErrorRun(t, "clusters", "create") - assert.Contains(t, err.Error(), "accepts 1 arg(s), received 0") -} diff --git a/internal/helpers.go b/internal/helpers.go deleted file mode 100644 index 596f45537..000000000 --- a/internal/helpers.go +++ /dev/null @@ -1,623 +0,0 @@ -package internal - -import ( - "bufio" - "bytes" - "context" - "encoding/json" - "errors" - "fmt" - "io" - "math/rand" - "net/http" - "os" - "path" - "path/filepath" - "reflect" - "strings" - "sync" - "testing" - "time" - - "github.com/databricks/cli/cmd/root" - "github.com/databricks/cli/internal/acc" - "github.com/databricks/cli/libs/flags" - - "github.com/databricks/cli/cmd" - _ "github.com/databricks/cli/cmd/version" - "github.com/databricks/cli/libs/cmdio" - "github.com/databricks/cli/libs/filer" - "github.com/databricks/databricks-sdk-go" - "github.com/databricks/databricks-sdk-go/apierr" - "github.com/databricks/databricks-sdk-go/service/catalog" - "github.com/databricks/databricks-sdk-go/service/compute" - "github.com/databricks/databricks-sdk-go/service/files" - "github.com/databricks/databricks-sdk-go/service/jobs" - "github.com/databricks/databricks-sdk-go/service/workspace" - "github.com/spf13/cobra" - "github.com/spf13/pflag" - "github.com/stretchr/testify/require" - - _ "github.com/databricks/cli/cmd/workspace" -) - -const charset = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" - -// GetEnvOrSkipTest proceeds with test only with that env variable -func GetEnvOrSkipTest(t *testing.T, name string) string { - value := os.Getenv(name) - if value == "" { - t.Skipf("Environment variable %s is missing", name) - } - return value -} - -// RandomName gives random name with optional prefix. e.g. qa.RandomName("tf-") -func RandomName(prefix ...string) string { - randLen := 12 - b := make([]byte, randLen) - for i := range b { - b[i] = charset[rand.Intn(randLen)] - } - if len(prefix) > 0 { - return fmt.Sprintf("%s%s", strings.Join(prefix, ""), b) - } - return string(b) -} - -// Helper for running the root command in the background. -// It ensures that the background goroutine terminates upon -// test completion through cancelling the command context. -type cobraTestRunner struct { - *testing.T - - args []string - stdout bytes.Buffer - stderr bytes.Buffer - stdinR *io.PipeReader - stdinW *io.PipeWriter - - ctx context.Context - - // Line-by-line output. - // Background goroutines populate these channels by reading from stdout/stderr pipes. - stdoutLines <-chan string - stderrLines <-chan string - - errch <-chan error -} - -func consumeLines(ctx context.Context, wg *sync.WaitGroup, r io.Reader) <-chan string { - ch := make(chan string, 30000) - wg.Add(1) - go func() { - defer close(ch) - defer wg.Done() - scanner := bufio.NewScanner(r) - for scanner.Scan() { - // We expect to be able to always send these lines into the channel. - // If we can't, it means the channel is full and likely there is a problem - // in either the test or the code under test. - select { - case <-ctx.Done(): - return - case ch <- scanner.Text(): - continue - default: - panic("line buffer is full") - } - } - }() - return ch -} - -func (t *cobraTestRunner) registerFlagCleanup(c *cobra.Command) { - // Find target command that will be run. Example: if the command run is `databricks fs cp`, - // target command corresponds to `cp` - targetCmd, _, err := c.Find(t.args) - if err != nil && strings.HasPrefix(err.Error(), "unknown command") { - // even if command is unknown, we can proceed - require.NotNil(t, targetCmd) - } else { - require.NoError(t, err) - } - - // Force initialization of default flags. - // These are initialized by cobra at execution time and would otherwise - // not be cleaned up by the cleanup function below. - targetCmd.InitDefaultHelpFlag() - targetCmd.InitDefaultVersionFlag() - - // Restore flag values to their original value on test completion. - targetCmd.Flags().VisitAll(func(f *pflag.Flag) { - v := reflect.ValueOf(f.Value) - if v.Kind() == reflect.Ptr { - v = v.Elem() - } - // Store copy of the current flag value. - reset := reflect.New(v.Type()).Elem() - reset.Set(v) - t.Cleanup(func() { - v.Set(reset) - }) - }) -} - -// Like [cobraTestRunner.Eventually], but more specific -func (t *cobraTestRunner) WaitForTextPrinted(text string, timeout time.Duration) { - t.Eventually(func() bool { - currentStdout := t.stdout.String() - return strings.Contains(currentStdout, text) - }, timeout, 50*time.Millisecond) -} - -func (t *cobraTestRunner) WaitForOutput(text string, timeout time.Duration) { - require.Eventually(t.T, func() bool { - currentStdout := t.stdout.String() - currentErrout := t.stderr.String() - return strings.Contains(currentStdout, text) || strings.Contains(currentErrout, text) - }, timeout, 50*time.Millisecond) -} - -func (t *cobraTestRunner) WithStdin() { - reader, writer := io.Pipe() - t.stdinR = reader - t.stdinW = writer -} - -func (t *cobraTestRunner) CloseStdin() { - if t.stdinW == nil { - panic("no standard input configured") - } - t.stdinW.Close() -} - -func (t *cobraTestRunner) SendText(text string) { - if t.stdinW == nil { - panic("no standard input configured") - } - t.stdinW.Write([]byte(text + "\n")) -} - -func (t *cobraTestRunner) RunBackground() { - var stdoutR, stderrR io.Reader - var stdoutW, stderrW io.WriteCloser - stdoutR, stdoutW = io.Pipe() - stderrR, stderrW = io.Pipe() - ctx := cmdio.NewContext(t.ctx, &cmdio.Logger{ - Mode: flags.ModeAppend, - Reader: bufio.Reader{}, - Writer: stderrW, - }) - - cli := cmd.New(ctx) - cli.SetOut(stdoutW) - cli.SetErr(stderrW) - cli.SetArgs(t.args) - if t.stdinW != nil { - cli.SetIn(t.stdinR) - } - - // Register cleanup function to restore flags to their original values - // once test has been executed. This is needed because flag values reside - // in a global singleton data-structure, and thus subsequent tests might - // otherwise interfere with each other - t.registerFlagCleanup(cli) - - errch := make(chan error) - ctx, cancel := context.WithCancel(ctx) - - // Tee stdout/stderr to buffers. - stdoutR = io.TeeReader(stdoutR, &t.stdout) - stderrR = io.TeeReader(stderrR, &t.stderr) - - // Consume stdout/stderr line-by-line. - var wg sync.WaitGroup - t.stdoutLines = consumeLines(ctx, &wg, stdoutR) - t.stderrLines = consumeLines(ctx, &wg, stderrR) - - // Run command in background. - go func() { - err := root.Execute(ctx, cli) - if err != nil { - t.Logf("Error running command: %s", err) - } - - // Close pipes to signal EOF. - stdoutW.Close() - stderrW.Close() - - // Wait for the [consumeLines] routines to finish now that - // the pipes they're reading from have closed. - wg.Wait() - - if t.stdout.Len() > 0 { - // Make a copy of the buffer such that it remains "unread". - scanner := bufio.NewScanner(bytes.NewBuffer(t.stdout.Bytes())) - for scanner.Scan() { - t.Logf("[databricks stdout]: %s", scanner.Text()) - } - } - - if t.stderr.Len() > 0 { - // Make a copy of the buffer such that it remains "unread". - scanner := bufio.NewScanner(bytes.NewBuffer(t.stderr.Bytes())) - for scanner.Scan() { - t.Logf("[databricks stderr]: %s", scanner.Text()) - } - } - - // Reset context on command for the next test. - // These commands are globals so we have to clean up to the best of our ability after each run. - // See https://github.com/spf13/cobra/blob/a6f198b635c4b18fff81930c40d464904e55b161/command.go#L1062-L1066 - //nolint:staticcheck // cobra sets the context and doesn't clear it - cli.SetContext(nil) - - // Make caller aware of error. - errch <- err - close(errch) - }() - - // Ensure command terminates upon test completion (success or failure). - t.Cleanup(func() { - // Signal termination of command. - cancel() - // Wait for goroutine to finish. - <-errch - }) - - t.errch = errch -} - -func (t *cobraTestRunner) Run() (bytes.Buffer, bytes.Buffer, error) { - t.RunBackground() - err := <-t.errch - return t.stdout, t.stderr, err -} - -// Like [require.Eventually] but errors if the underlying command has failed. -func (c *cobraTestRunner) Eventually(condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...any) { - ch := make(chan bool, 1) - - timer := time.NewTimer(waitFor) - defer timer.Stop() - - ticker := time.NewTicker(tick) - defer ticker.Stop() - - // Kick off condition check immediately. - go func() { ch <- condition() }() - - for tick := ticker.C; ; { - select { - case err := <-c.errch: - require.Fail(c, "Command failed", err) - return - case <-timer.C: - require.Fail(c, "Condition never satisfied", msgAndArgs...) - return - case <-tick: - tick = nil - go func() { ch <- condition() }() - case v := <-ch: - if v { - return - } - tick = ticker.C - } - } -} - -func (t *cobraTestRunner) RunAndExpectOutput(heredoc string) { - stdout, _, err := t.Run() - require.NoError(t, err) - require.Equal(t, cmdio.Heredoc(heredoc), strings.TrimSpace(stdout.String())) -} - -func (t *cobraTestRunner) RunAndParseJSON(v any) { - stdout, _, err := t.Run() - require.NoError(t, err) - err = json.Unmarshal(stdout.Bytes(), &v) - require.NoError(t, err) -} - -func NewCobraTestRunner(t *testing.T, args ...string) *cobraTestRunner { - return &cobraTestRunner{ - T: t, - ctx: context.Background(), - args: args, - } -} - -func NewCobraTestRunnerWithContext(t *testing.T, ctx context.Context, args ...string) *cobraTestRunner { - return &cobraTestRunner{ - T: t, - ctx: ctx, - args: args, - } -} - -func RequireSuccessfulRun(t *testing.T, args ...string) (bytes.Buffer, bytes.Buffer) { - t.Logf("run args: [%s]", strings.Join(args, ", ")) - c := NewCobraTestRunner(t, args...) - stdout, stderr, err := c.Run() - require.NoError(t, err) - return stdout, stderr -} - -func RequireErrorRun(t *testing.T, args ...string) (bytes.Buffer, bytes.Buffer, error) { - c := NewCobraTestRunner(t, args...) - stdout, stderr, err := c.Run() - require.Error(t, err) - return stdout, stderr, err -} - -func readFile(t *testing.T, name string) string { - b, err := os.ReadFile(name) - require.NoError(t, err) - - return string(b) -} - -func writeFile(t *testing.T, name string, body string) string { - f, err := os.Create(filepath.Join(t.TempDir(), name)) - require.NoError(t, err) - _, err = f.WriteString(body) - require.NoError(t, err) - f.Close() - return f.Name() -} - -func GenerateNotebookTasks(notebookPath string, versions []string, nodeTypeId string) []jobs.SubmitTask { - tasks := make([]jobs.SubmitTask, 0) - for i := 0; i < len(versions); i++ { - task := jobs.SubmitTask{ - TaskKey: fmt.Sprintf("notebook_%s", strings.ReplaceAll(versions[i], ".", "_")), - NotebookTask: &jobs.NotebookTask{ - NotebookPath: notebookPath, - }, - NewCluster: &compute.ClusterSpec{ - SparkVersion: versions[i], - NumWorkers: 1, - NodeTypeId: nodeTypeId, - DataSecurityMode: compute.DataSecurityModeUserIsolation, - }, - } - tasks = append(tasks, task) - } - - return tasks -} - -func GenerateSparkPythonTasks(notebookPath string, versions []string, nodeTypeId string) []jobs.SubmitTask { - tasks := make([]jobs.SubmitTask, 0) - for i := 0; i < len(versions); i++ { - task := jobs.SubmitTask{ - TaskKey: fmt.Sprintf("spark_%s", strings.ReplaceAll(versions[i], ".", "_")), - SparkPythonTask: &jobs.SparkPythonTask{ - PythonFile: notebookPath, - }, - NewCluster: &compute.ClusterSpec{ - SparkVersion: versions[i], - NumWorkers: 1, - NodeTypeId: nodeTypeId, - DataSecurityMode: compute.DataSecurityModeUserIsolation, - }, - } - tasks = append(tasks, task) - } - - return tasks -} - -func GenerateWheelTasks(wheelPath string, versions []string, nodeTypeId string) []jobs.SubmitTask { - tasks := make([]jobs.SubmitTask, 0) - for i := 0; i < len(versions); i++ { - task := jobs.SubmitTask{ - TaskKey: fmt.Sprintf("whl_%s", strings.ReplaceAll(versions[i], ".", "_")), - PythonWheelTask: &jobs.PythonWheelTask{ - PackageName: "my_test_code", - EntryPoint: "run", - }, - NewCluster: &compute.ClusterSpec{ - SparkVersion: versions[i], - NumWorkers: 1, - NodeTypeId: nodeTypeId, - DataSecurityMode: compute.DataSecurityModeUserIsolation, - }, - Libraries: []compute.Library{ - {Whl: wheelPath}, - }, - } - tasks = append(tasks, task) - } - - return tasks -} - -func TemporaryWorkspaceDir(t *testing.T, w *databricks.WorkspaceClient) string { - ctx := context.Background() - me, err := w.CurrentUser.Me(ctx) - require.NoError(t, err) - - basePath := fmt.Sprintf("/Users/%s/%s", me.UserName, RandomName("integration-test-wsfs-")) - - t.Logf("Creating %s", basePath) - err = w.Workspace.MkdirsByPath(ctx, basePath) - require.NoError(t, err) - - // Remove test directory on test completion. - t.Cleanup(func() { - t.Logf("Removing %s", basePath) - err := w.Workspace.Delete(ctx, workspace.Delete{ - Path: basePath, - Recursive: true, - }) - if err == nil || apierr.IsMissing(err) { - return - } - t.Logf("Unable to remove temporary workspace directory %s: %#v", basePath, err) - }) - - return basePath -} - -func TemporaryDbfsDir(t *testing.T, w *databricks.WorkspaceClient) string { - ctx := context.Background() - path := fmt.Sprintf("/tmp/%s", RandomName("integration-test-dbfs-")) - - t.Logf("Creating DBFS folder:%s", path) - err := w.Dbfs.MkdirsByPath(ctx, path) - require.NoError(t, err) - - t.Cleanup(func() { - t.Logf("Removing DBFS folder:%s", path) - err := w.Dbfs.Delete(ctx, files.Delete{ - Path: path, - Recursive: true, - }) - if err == nil || apierr.IsMissing(err) { - return - } - t.Logf("unable to remove temporary dbfs directory %s: %#v", path, err) - }) - - return path -} - -// Create a new UC volume in a catalog called "main" in the workspace. -func TemporaryUcVolume(t *testing.T, w *databricks.WorkspaceClient) string { - ctx := context.Background() - - // Create a schema - schema, err := w.Schemas.Create(ctx, catalog.CreateSchema{ - CatalogName: "main", - Name: RandomName("test-schema-"), - }) - require.NoError(t, err) - t.Cleanup(func() { - w.Schemas.Delete(ctx, catalog.DeleteSchemaRequest{ - FullName: schema.FullName, - }) - }) - - // Create a volume - volume, err := w.Volumes.Create(ctx, catalog.CreateVolumeRequestContent{ - CatalogName: "main", - SchemaName: schema.Name, - Name: "my-volume", - VolumeType: catalog.VolumeTypeManaged, - }) - require.NoError(t, err) - t.Cleanup(func() { - w.Volumes.Delete(ctx, catalog.DeleteVolumeRequest{ - Name: volume.FullName, - }) - }) - - return path.Join("/Volumes", "main", schema.Name, volume.Name) - -} - -func TemporaryRepo(t *testing.T, w *databricks.WorkspaceClient) string { - ctx := context.Background() - me, err := w.CurrentUser.Me(ctx) - require.NoError(t, err) - - repoPath := fmt.Sprintf("/Repos/%s/%s", me.UserName, RandomName("integration-test-repo-")) - - t.Logf("Creating repo:%s", repoPath) - repoInfo, err := w.Repos.Create(ctx, workspace.CreateRepoRequest{ - Url: "https://github.com/databricks/cli", - Provider: "github", - Path: repoPath, - }) - require.NoError(t, err) - - t.Cleanup(func() { - t.Logf("Removing repo: %s", repoPath) - err := w.Repos.Delete(ctx, workspace.DeleteRepoRequest{ - RepoId: repoInfo.Id, - }) - if err == nil || apierr.IsMissing(err) { - return - } - t.Logf("unable to remove repo %s: %#v", repoPath, err) - }) - - return repoPath -} - -func GetNodeTypeId(env string) string { - if env == "gcp" { - return "n1-standard-4" - } else if env == "aws" || env == "ucws" { - // aws-prod-ucws has CLOUD_ENV set to "ucws" - return "i3.xlarge" - } - return "Standard_DS4_v2" -} - -func setupLocalFiler(t *testing.T) (filer.Filer, string) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) - - tmp := t.TempDir() - f, err := filer.NewLocalClient(tmp) - require.NoError(t, err) - - return f, path.Join(filepath.ToSlash(tmp)) -} - -func setupWsfsFiler(t *testing.T) (filer.Filer, string) { - ctx, wt := acc.WorkspaceTest(t) - - tmpdir := TemporaryWorkspaceDir(t, wt.W) - f, err := filer.NewWorkspaceFilesClient(wt.W, tmpdir) - require.NoError(t, err) - - // Check if we can use this API here, skip test if we cannot. - _, err = f.Read(ctx, "we_use_this_call_to_test_if_this_api_is_enabled") - var aerr *apierr.APIError - if errors.As(err, &aerr) && aerr.StatusCode == http.StatusBadRequest { - t.Skip(aerr.Message) - } - - return f, tmpdir -} - -func setupWsfsExtensionsFiler(t *testing.T) (filer.Filer, string) { - _, wt := acc.WorkspaceTest(t) - - tmpdir := TemporaryWorkspaceDir(t, wt.W) - f, err := filer.NewWorkspaceFilesExtensionsClient(wt.W, tmpdir) - require.NoError(t, err) - - return f, tmpdir -} - -func setupDbfsFiler(t *testing.T) (filer.Filer, string) { - _, wt := acc.WorkspaceTest(t) - - tmpDir := TemporaryDbfsDir(t, wt.W) - f, err := filer.NewDbfsClient(wt.W, tmpDir) - require.NoError(t, err) - - return f, path.Join("dbfs:/", tmpDir) -} - -func setupUcVolumesFiler(t *testing.T) (filer.Filer, string) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) - - if os.Getenv("TEST_METASTORE_ID") == "" { - t.Skip("Skipping tests that require a UC Volume when metastore id is not set.") - } - - w, err := databricks.NewWorkspaceClient() - require.NoError(t, err) - - tmpDir := TemporaryUcVolume(t, w) - f, err := filer.NewFilesClient(w, tmpDir) - require.NoError(t, err) - - return f, path.Join("dbfs:/", tmpDir) -} diff --git a/internal/jobs_test.go b/internal/jobs_test.go deleted file mode 100644 index 8513168c8..000000000 --- a/internal/jobs_test.go +++ /dev/null @@ -1,25 +0,0 @@ -package internal - -import ( - "encoding/json" - "fmt" - "testing" - - "github.com/databricks/cli/internal/acc" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestAccCreateJob(t *testing.T) { - acc.WorkspaceTest(t) - env := GetEnvOrSkipTest(t, "CLOUD_ENV") - if env != "azure" { - t.Skipf("Not running test on cloud %s", env) - } - stdout, stderr := RequireSuccessfulRun(t, "jobs", "create", "--json", "@testjsons/create_job_without_workers.json", "--log-level=debug") - assert.Empty(t, stderr.String()) - var output map[string]int - err := json.Unmarshal(stdout.Bytes(), &output) - require.NoError(t, err) - RequireSuccessfulRun(t, "jobs", "delete", fmt.Sprint(output["job_id"]), "--log-level=debug") -} diff --git a/internal/storage_credentials_test.go b/internal/storage_credentials_test.go deleted file mode 100644 index 07c21861f..000000000 --- a/internal/storage_credentials_test.go +++ /dev/null @@ -1,19 +0,0 @@ -package internal - -import ( - "testing" - - "github.com/databricks/cli/internal/acc" - "github.com/stretchr/testify/assert" -) - -func TestAccStorageCredentialsListRendersResponse(t *testing.T) { - _, _ = acc.WorkspaceTest(t) - - // Check if metastore is assigned for the workspace, otherwise test will fail - t.Log(GetEnvOrSkipTest(t, "TEST_METASTORE_ID")) - - stdout, stderr := RequireSuccessfulRun(t, "storage-credentials", "list") - assert.NotEmpty(t, stdout) - assert.Empty(t, stderr) -} diff --git a/internal/testcli/README.md b/internal/testcli/README.md new file mode 100644 index 000000000..b37ae3bc9 --- /dev/null +++ b/internal/testcli/README.md @@ -0,0 +1,7 @@ +# testcli + +This package provides a way to run the CLI from tests as if it were a separate process. +By running the CLI inline we can still set breakpoints and step through execution. + +It transitively imports pretty much this entire repository, which is why we +intentionally keep this package _separate_ from `testutil`. diff --git a/internal/testcli/golden.go b/internal/testcli/golden.go new file mode 100644 index 000000000..34f38f18a --- /dev/null +++ b/internal/testcli/golden.go @@ -0,0 +1,224 @@ +package testcli + +import ( + "context" + "fmt" + "os" + "regexp" + "slices" + "strings" + "testing" + + "github.com/databricks/cli/internal/testutil" + "github.com/databricks/cli/libs/iamutil" + "github.com/databricks/cli/libs/testdiff" + "github.com/databricks/databricks-sdk-go" + "github.com/databricks/databricks-sdk-go/service/iam" + "github.com/stretchr/testify/assert" +) + +var OverwriteMode = os.Getenv("TESTS_OUTPUT") == "OVERWRITE" + +func ReadFile(t testutil.TestingT, ctx context.Context, filename string) string { + data, err := os.ReadFile(filename) + if os.IsNotExist(err) { + return "" + } + assert.NoError(t, err) + // On CI, on Windows \n in the file somehow end up as \r\n + return NormalizeNewlines(string(data)) +} + +func captureOutput(t testutil.TestingT, ctx context.Context, args []string) string { + t.Logf("run args: [%s]", strings.Join(args, ", ")) + r := NewRunner(t, ctx, args...) + stdout, stderr, err := r.Run() + assert.NoError(t, err) + out := stderr.String() + stdout.String() + return ReplaceOutput(t, ctx, out) +} + +func WriteFile(t testutil.TestingT, filename, data string) { + t.Logf("Overwriting %s", filename) + err := os.WriteFile(filename, []byte(data), 0o644) + assert.NoError(t, err) +} + +func AssertOutput(t testutil.TestingT, ctx context.Context, args []string, expectedPath string) { + expected := ReadFile(t, ctx, expectedPath) + + out := captureOutput(t, ctx, args) + + if out != expected { + actual := fmt.Sprintf("Output from %v", args) + testdiff.AssertEqualTexts(t, expectedPath, actual, expected, out) + + if OverwriteMode { + WriteFile(t, expectedPath, out) + } + } +} + +func AssertOutputJQ(t testutil.TestingT, ctx context.Context, args []string, expectedPath string, ignorePaths []string) { + expected := ReadFile(t, ctx, expectedPath) + + out := captureOutput(t, ctx, args) + + if out != expected { + actual := fmt.Sprintf("Output from %v", args) + testdiff.AssertEqualJQ(t.(*testing.T), expectedPath, actual, expected, out, ignorePaths) + + if OverwriteMode { + WriteFile(t, expectedPath, out) + } + } +} + +var ( + uuidRegex = regexp.MustCompile(`[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}`) + numIdRegex = regexp.MustCompile(`[0-9]{3,}`) + privatePathRegex = regexp.MustCompile(`(/tmp|/private)(/.*)/([a-zA-Z0-9]+)`) +) + +func ReplaceOutput(t testutil.TestingT, ctx context.Context, out string) string { + out = NormalizeNewlines(out) + replacements := GetReplacementsMap(ctx) + if replacements == nil { + t.Fatal("WithReplacementsMap was not called") + } + out = replacements.Replace(out) + out = uuidRegex.ReplaceAllString(out, "") + out = numIdRegex.ReplaceAllString(out, "") + out = privatePathRegex.ReplaceAllString(out, "/tmp/.../$3") + + return out +} + +type key int + +const ( + replacementsMapKey = key(1) +) + +type Replacement struct { + Old string + New string +} + +type ReplacementsContext struct { + Repls []Replacement +} + +func (r *ReplacementsContext) Replace(s string) string { + // QQQ Should probably only replace whole words + for _, repl := range r.Repls { + s = strings.ReplaceAll(s, repl.Old, repl.New) + } + return s +} + +func (r *ReplacementsContext) Set(old, new string) { + if old == "" || new == "" { + return + } + r.Repls = append(r.Repls, Replacement{Old: old, New: new}) +} + +func WithReplacementsMap(ctx context.Context) (context.Context, *ReplacementsContext) { + value := ctx.Value(replacementsMapKey) + if value != nil { + if existingMap, ok := value.(*ReplacementsContext); ok { + return ctx, existingMap + } + } + + newMap := &ReplacementsContext{} + ctx = context.WithValue(ctx, replacementsMapKey, newMap) + return ctx, newMap +} + +func GetReplacementsMap(ctx context.Context) *ReplacementsContext { + value := ctx.Value(replacementsMapKey) + if value != nil { + if existingMap, ok := value.(*ReplacementsContext); ok { + return existingMap + } + } + return nil +} + +func PrepareReplacements(t testutil.TestingT, r *ReplacementsContext, w *databricks.WorkspaceClient) { + // in some clouds (gcp) w.Config.Host includes "https://" prefix in others it's really just a host (azure) + host := strings.TrimPrefix(strings.TrimPrefix(w.Config.Host, "http://"), "https://") + r.Set(host, "$DATABRICKS_HOST") + r.Set(w.Config.ClusterID, "$DATABRICKS_CLUSTER_ID") + r.Set(w.Config.WarehouseID, "$DATABRICKS_WAREHOUSE_ID") + r.Set(w.Config.ServerlessComputeID, "$DATABRICKS_SERVERLESS_COMPUTE_ID") + r.Set(w.Config.MetadataServiceURL, "$DATABRICKS_METADATA_SERVICE_URL") + r.Set(w.Config.AccountID, "$DATABRICKS_ACCOUNT_ID") + r.Set(w.Config.Token, "$DATABRICKS_TOKEN") + r.Set(w.Config.Username, "$DATABRICKS_USERNAME") + r.Set(w.Config.Password, "$DATABRICKS_PASSWORD") + r.Set(w.Config.Profile, "$DATABRICKS_CONFIG_PROFILE") + r.Set(w.Config.ConfigFile, "$DATABRICKS_CONFIG_FILE") + r.Set(w.Config.GoogleServiceAccount, "$DATABRICKS_GOOGLE_SERVICE_ACCOUNT") + r.Set(w.Config.GoogleCredentials, "$GOOGLE_CREDENTIALS") + r.Set(w.Config.AzureResourceID, "$DATABRICKS_AZURE_RESOURCE_ID") + r.Set(w.Config.AzureClientSecret, "$ARM_CLIENT_SECRET") + // r.Set(w.Config.AzureClientID, "$ARM_CLIENT_ID") + r.Set(w.Config.AzureClientID, "$USERNAME") + r.Set(w.Config.AzureTenantID, "$ARM_TENANT_ID") + r.Set(w.Config.ActionsIDTokenRequestURL, "$ACTIONS_ID_TOKEN_REQUEST_URL") + r.Set(w.Config.ActionsIDTokenRequestToken, "$ACTIONS_ID_TOKEN_REQUEST_TOKEN") + r.Set(w.Config.AzureEnvironment, "$ARM_ENVIRONMENT") + r.Set(w.Config.ClientID, "$DATABRICKS_CLIENT_ID") + r.Set(w.Config.ClientSecret, "$DATABRICKS_CLIENT_SECRET") + r.Set(w.Config.DatabricksCliPath, "$DATABRICKS_CLI_PATH") + // This is set to words like "path" that happen too frequently + // r.Set(w.Config.AuthType, "$DATABRICKS_AUTH_TYPE") +} + +func PrepareReplacementsUser(t testutil.TestingT, r *ReplacementsContext, u iam.User) { + // There could be exact matches or overlap between different name fields, so sort them by length + // to ensure we match the largest one first and map them all to the same token + names := []string{ + u.DisplayName, + u.UserName, + iamutil.GetShortUserName(&u), + u.Name.FamilyName, + u.Name.GivenName, + } + if u.Name != nil { + names = append(names, u.Name.FamilyName) + names = append(names, u.Name.GivenName) + } + for _, val := range u.Emails { + names = append(names, val.Value) + } + stableSortReverseLength(names) + + for _, name := range names { + r.Set(name, "$USERNAME") + } + + for ind, val := range u.Groups { + r.Set(val.Value, fmt.Sprintf("$USER.Groups[%d]", ind)) + } + + r.Set(u.Id, "$USER.Id") + + for ind, val := range u.Roles { + r.Set(val.Value, fmt.Sprintf("$USER.Roles[%d]", ind)) + } +} + +func stableSortReverseLength(strs []string) { + slices.SortStableFunc(strs, func(a, b string) int { + return len(b) - len(a) + }) +} + +func NormalizeNewlines(input string) string { + output := strings.ReplaceAll(input, "\r\n", "\n") + return strings.ReplaceAll(output, "\r", "\n") +} diff --git a/internal/testcli/golden_test.go b/internal/testcli/golden_test.go new file mode 100644 index 000000000..215bf33d3 --- /dev/null +++ b/internal/testcli/golden_test.go @@ -0,0 +1,13 @@ +package testcli + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestSort(t *testing.T) { + input := []string{"a", "bc", "cd"} + stableSortReverseLength(input) + assert.Equal(t, []string{"bc", "cd", "a"}, input) +} diff --git a/internal/testcli/runner.go b/internal/testcli/runner.go new file mode 100644 index 000000000..95073b57c --- /dev/null +++ b/internal/testcli/runner.go @@ -0,0 +1,306 @@ +package testcli + +import ( + "bufio" + "bytes" + "context" + "encoding/json" + "io" + "reflect" + "strings" + "sync" + "time" + + "github.com/spf13/cobra" + "github.com/spf13/pflag" + "github.com/stretchr/testify/require" + + "github.com/databricks/cli/cmd" + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/internal/testutil" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" +) + +// Helper for running the root command in the background. +// It ensures that the background goroutine terminates upon +// test completion through cancelling the command context. +type Runner struct { + testutil.TestingT + + args []string + stdout bytes.Buffer + stderr bytes.Buffer + stdinR *io.PipeReader + stdinW *io.PipeWriter + + ctx context.Context + + // Line-by-line output. + // Background goroutines populate these channels by reading from stdout/stderr pipes. + StdoutLines <-chan string + StderrLines <-chan string + + errch <-chan error +} + +func consumeLines(ctx context.Context, wg *sync.WaitGroup, r io.Reader) <-chan string { + ch := make(chan string, 30000) + wg.Add(1) + go func() { + defer close(ch) + defer wg.Done() + scanner := bufio.NewScanner(r) + for scanner.Scan() { + // We expect to be able to always send these lines into the channel. + // If we can't, it means the channel is full and likely there is a problem + // in either the test or the code under test. + select { + case <-ctx.Done(): + return + case ch <- scanner.Text(): + continue + default: + panic("line buffer is full") + } + } + }() + return ch +} + +func (r *Runner) registerFlagCleanup(c *cobra.Command) { + // Find target command that will be run. Example: if the command run is `databricks fs cp`, + // target command corresponds to `cp` + targetCmd, _, err := c.Find(r.args) + if err != nil && strings.HasPrefix(err.Error(), "unknown command") { + // even if command is unknown, we can proceed + require.NotNil(r, targetCmd) + } else { + require.NoError(r, err) + } + + // Force initialization of default flags. + // These are initialized by cobra at execution time and would otherwise + // not be cleaned up by the cleanup function below. + targetCmd.InitDefaultHelpFlag() + targetCmd.InitDefaultVersionFlag() + + // Restore flag values to their original value on test completion. + targetCmd.Flags().VisitAll(func(f *pflag.Flag) { + v := reflect.ValueOf(f.Value) + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + // Store copy of the current flag value. + reset := reflect.New(v.Type()).Elem() + reset.Set(v) + r.Cleanup(func() { + v.Set(reset) + }) + }) +} + +// Like [Runner.Eventually], but more specific +func (r *Runner) WaitForTextPrinted(text string, timeout time.Duration) { + r.Eventually(func() bool { + currentStdout := r.stdout.String() + return strings.Contains(currentStdout, text) + }, timeout, 50*time.Millisecond) +} + +func (r *Runner) WaitForOutput(text string, timeout time.Duration) { + require.Eventually(r, func() bool { + currentStdout := r.stdout.String() + currentErrout := r.stderr.String() + return strings.Contains(currentStdout, text) || strings.Contains(currentErrout, text) + }, timeout, 50*time.Millisecond) +} + +func (r *Runner) WithStdin() { + reader, writer := io.Pipe() + r.stdinR = reader + r.stdinW = writer +} + +func (r *Runner) CloseStdin() { + if r.stdinW == nil { + panic("no standard input configured") + } + r.stdinW.Close() +} + +func (r *Runner) SendText(text string) { + if r.stdinW == nil { + panic("no standard input configured") + } + _, err := r.stdinW.Write([]byte(text + "\n")) + if err != nil { + panic("Failed to to write to t.stdinW") + } +} + +func (r *Runner) RunBackground() { + var stdoutR, stderrR io.Reader + var stdoutW, stderrW io.WriteCloser + stdoutR, stdoutW = io.Pipe() + stderrR, stderrW = io.Pipe() + ctx := cmdio.NewContext(r.ctx, &cmdio.Logger{ + Mode: flags.ModeAppend, + Reader: bufio.Reader{}, + Writer: stderrW, + }) + + cli := cmd.New(ctx) + cli.SetOut(stdoutW) + cli.SetErr(stderrW) + cli.SetArgs(r.args) + if r.stdinW != nil { + cli.SetIn(r.stdinR) + } + + // Register cleanup function to restore flags to their original values + // once test has been executed. This is needed because flag values reside + // in a global singleton data-structure, and thus subsequent tests might + // otherwise interfere with each other + r.registerFlagCleanup(cli) + + errch := make(chan error) + ctx, cancel := context.WithCancel(ctx) + + // Tee stdout/stderr to buffers. + stdoutR = io.TeeReader(stdoutR, &r.stdout) + stderrR = io.TeeReader(stderrR, &r.stderr) + + // Consume stdout/stderr line-by-line. + var wg sync.WaitGroup + r.StdoutLines = consumeLines(ctx, &wg, stdoutR) + r.StderrLines = consumeLines(ctx, &wg, stderrR) + + // Run command in background. + go func() { + err := root.Execute(ctx, cli) + if err != nil { + r.Logf("Error running command: %s", err) + } + + // Close pipes to signal EOF. + stdoutW.Close() + stderrW.Close() + + // Wait for the [consumeLines] routines to finish now that + // the pipes they're reading from have closed. + wg.Wait() + + if r.stdout.Len() > 0 { + // Make a copy of the buffer such that it remains "unread". + scanner := bufio.NewScanner(bytes.NewBuffer(r.stdout.Bytes())) + for scanner.Scan() { + r.Logf("[databricks stdout]: %s", scanner.Text()) + } + } + + if r.stderr.Len() > 0 { + // Make a copy of the buffer such that it remains "unread". + scanner := bufio.NewScanner(bytes.NewBuffer(r.stderr.Bytes())) + for scanner.Scan() { + r.Logf("[databricks stderr]: %s", scanner.Text()) + } + } + + // Reset context on command for the next test. + // These commands are globals so we have to clean up to the best of our ability after each run. + // See https://github.com/spf13/cobra/blob/a6f198b635c4b18fff81930c40d464904e55b161/command.go#L1062-L1066 + //nolint:staticcheck // cobra sets the context and doesn't clear it + cli.SetContext(nil) + + // Make caller aware of error. + errch <- err + close(errch) + }() + + // Ensure command terminates upon test completion (success or failure). + r.Cleanup(func() { + // Signal termination of command. + cancel() + // Wait for goroutine to finish. + <-errch + }) + + r.errch = errch +} + +func (r *Runner) Run() (bytes.Buffer, bytes.Buffer, error) { + r.RunBackground() + err := <-r.errch + return r.stdout, r.stderr, err +} + +// Like [require.Eventually] but errors if the underlying command has failed. +func (r *Runner) Eventually(condition func() bool, waitFor, tick time.Duration, msgAndArgs ...any) { + ch := make(chan bool, 1) + + timer := time.NewTimer(waitFor) + defer timer.Stop() + + ticker := time.NewTicker(tick) + defer ticker.Stop() + + // Kick off condition check immediately. + go func() { ch <- condition() }() + + for tick := ticker.C; ; { + select { + case err := <-r.errch: + require.Fail(r, "Command failed", err) + return + case <-timer.C: + require.Fail(r, "Condition never satisfied", msgAndArgs...) + return + case <-tick: + tick = nil + go func() { ch <- condition() }() + case v := <-ch: + if v { + return + } + tick = ticker.C + } + } +} + +func (r *Runner) RunAndExpectOutput(heredoc string) { + stdout, _, err := r.Run() + require.NoError(r, err) + require.Equal(r, cmdio.Heredoc(heredoc), strings.TrimSpace(stdout.String())) +} + +func (r *Runner) RunAndParseJSON(v any) { + stdout, _, err := r.Run() + require.NoError(r, err) + err = json.Unmarshal(stdout.Bytes(), &v) + require.NoError(r, err) +} + +func NewRunner(t testutil.TestingT, ctx context.Context, args ...string) *Runner { + return &Runner{ + TestingT: t, + + ctx: ctx, + args: args, + } +} + +func RequireSuccessfulRun(t testutil.TestingT, ctx context.Context, args ...string) (bytes.Buffer, bytes.Buffer) { + t.Logf("run args: [%s]", strings.Join(args, ", ")) + r := NewRunner(t, ctx, args...) + stdout, stderr, err := r.Run() + require.NoError(t, err) + return stdout, stderr +} + +func RequireErrorRun(t testutil.TestingT, ctx context.Context, args ...string) (bytes.Buffer, bytes.Buffer, error) { + r := NewRunner(t, ctx, args...) + stdout, stderr, err := r.Run() + require.Error(t, err) + return stdout, stderr, err +} diff --git a/internal/testutil/cloud.go b/internal/testutil/cloud.go index ba5b75ecf..33921db0c 100644 --- a/internal/testutil/cloud.go +++ b/internal/testutil/cloud.go @@ -1,9 +1,5 @@ package testutil -import ( - "testing" -) - type Cloud int const ( @@ -13,7 +9,7 @@ const ( ) // Implement [Requirement]. -func (c Cloud) Verify(t *testing.T) { +func (c Cloud) Verify(t TestingT) { if c != GetCloud(t) { t.Skipf("Skipping %s-specific test", c) } @@ -32,7 +28,20 @@ func (c Cloud) String() string { } } -func GetCloud(t *testing.T) Cloud { +func (c Cloud) NodeTypeID() string { + switch c { + case AWS: + return "i3.xlarge" + case Azure: + return "Standard_DS4_v2" + case GCP: + return "n1-standard-4" + default: + return "unknown" + } +} + +func GetCloud(t TestingT) Cloud { env := GetEnvOrSkipTest(t, "CLOUD_ENV") switch env { case "aws": @@ -49,7 +58,3 @@ func GetCloud(t *testing.T) Cloud { } return -1 } - -func IsAWSCloud(t *testing.T) bool { - return GetCloud(t) == AWS -} diff --git a/internal/testutil/copy.go b/internal/testutil/copy.go index 21faece00..a521da3e3 100644 --- a/internal/testutil/copy.go +++ b/internal/testutil/copy.go @@ -5,14 +5,13 @@ import ( "io/fs" "os" "path/filepath" - "testing" "github.com/stretchr/testify/require" ) // CopyDirectory copies the contents of a directory to another directory. // The destination directory is created if it does not exist. -func CopyDirectory(t *testing.T, src, dst string) { +func CopyDirectory(t TestingT, src, dst string) { err := filepath.WalkDir(src, func(path string, d fs.DirEntry, err error) error { if err != nil { return err @@ -22,7 +21,7 @@ func CopyDirectory(t *testing.T, src, dst string) { require.NoError(t, err) if d.IsDir() { - return os.MkdirAll(filepath.Join(dst, rel), 0755) + return os.MkdirAll(filepath.Join(dst, rel), 0o755) } // Copy the file to the temporary directory diff --git a/internal/testutil/env.go b/internal/testutil/env.go index e1973ba82..598229655 100644 --- a/internal/testutil/env.go +++ b/internal/testutil/env.go @@ -5,7 +5,6 @@ import ( "path/filepath" "runtime" "strings" - "testing" "github.com/stretchr/testify/require" ) @@ -13,7 +12,7 @@ import ( // CleanupEnvironment sets up a pristine environment containing only $PATH and $HOME. // The original environment is restored upon test completion. // Note: use of this function is incompatible with parallel execution. -func CleanupEnvironment(t *testing.T) { +func CleanupEnvironment(t TestingT) { // Restore environment when test finishes. environ := os.Environ() t.Cleanup(func() { @@ -39,20 +38,18 @@ func CleanupEnvironment(t *testing.T) { } } -// GetEnvOrSkipTest proceeds with test only with that env variable -func GetEnvOrSkipTest(t *testing.T, name string) string { - value := os.Getenv(name) - if value == "" { - t.Skipf("Environment variable %s is missing", name) - } - return value -} - // Changes into specified directory for the duration of the test. // Returns the current working directory. -func Chdir(t *testing.T, dir string) string { +func Chdir(t TestingT, dir string) string { + // Prevent parallel execution when changing the working directory. + // t.Setenv automatically fails if t.Parallel is set. + t.Setenv("DO_NOT_RUN_IN_PARALLEL", "true") + wd, err := os.Getwd() require.NoError(t, err) + if os.Getenv("TESTS_ORIG_WD") == "" { + t.Setenv("TESTS_ORIG_WD", wd) + } abs, err := filepath.Abs(dir) require.NoError(t, err) @@ -67,3 +64,10 @@ func Chdir(t *testing.T, dir string) string { return wd } + +// Return filename ff testutil.Chdir was not called. +// Return absolute path to filename testutil.Chdir() was called. +func TestData(filename string) string { + // Note, if TESTS_ORIG_WD is not set, Getenv return "" and Join returns filename + return filepath.Join(os.Getenv("TESTS_ORIG_WD"), filename) +} diff --git a/internal/testutil/file.go b/internal/testutil/file.go index ba2c3280e..538a3c20a 100644 --- a/internal/testutil/file.go +++ b/internal/testutil/file.go @@ -3,24 +3,23 @@ package testutil import ( "os" "path/filepath" - "testing" "github.com/stretchr/testify/require" ) -func TouchNotebook(t *testing.T, elems ...string) string { +func TouchNotebook(t TestingT, elems ...string) string { path := filepath.Join(elems...) - err := os.MkdirAll(filepath.Dir(path), 0755) + err := os.MkdirAll(filepath.Dir(path), 0o755) require.NoError(t, err) - err = os.WriteFile(path, []byte("# Databricks notebook source"), 0644) + err = os.WriteFile(path, []byte("# Databricks notebook source"), 0o644) require.NoError(t, err) return path } -func Touch(t *testing.T, elems ...string) string { +func Touch(t TestingT, elems ...string) string { path := filepath.Join(elems...) - err := os.MkdirAll(filepath.Dir(path), 0755) + err := os.MkdirAll(filepath.Dir(path), 0o755) require.NoError(t, err) f, err := os.Create(path) @@ -31,9 +30,9 @@ func Touch(t *testing.T, elems ...string) string { return path } -func WriteFile(t *testing.T, content string, elems ...string) string { - path := filepath.Join(elems...) - err := os.MkdirAll(filepath.Dir(path), 0755) +// WriteFile writes content to a file. +func WriteFile(t TestingT, path, content string) { + err := os.MkdirAll(filepath.Dir(path), 0o755) require.NoError(t, err) f, err := os.Create(path) @@ -44,5 +43,12 @@ func WriteFile(t *testing.T, content string, elems ...string) string { err = f.Close() require.NoError(t, err) - return path +} + +// ReadFile reads a file and returns its content as a string. +func ReadFile(t TestingT, path string) string { + b, err := os.ReadFile(path) + require.NoError(t, err) + + return string(b) } diff --git a/internal/acc/helpers.go b/internal/testutil/helpers.go similarity index 83% rename from internal/acc/helpers.go rename to internal/testutil/helpers.go index f98001346..019a8e618 100644 --- a/internal/acc/helpers.go +++ b/internal/testutil/helpers.go @@ -1,15 +1,14 @@ -package acc +package testutil import ( "fmt" "math/rand" "os" "strings" - "testing" ) // GetEnvOrSkipTest proceeds with test only with that env variable. -func GetEnvOrSkipTest(t *testing.T, name string) string { +func GetEnvOrSkipTest(t TestingT, name string) string { value := os.Getenv(name) if value == "" { t.Skipf("Environment variable %s is missing", name) @@ -24,7 +23,7 @@ func RandomName(prefix ...string) string { randLen := 12 b := make([]byte, randLen) for i := range b { - b[i] = charset[rand.Intn(randLen)] + b[i] = charset[rand.Intn(len(charset))] } if len(prefix) > 0 { return fmt.Sprintf("%s%s", strings.Join(prefix, ""), b) diff --git a/internal/testutil/interface.go b/internal/testutil/interface.go new file mode 100644 index 000000000..2c3004800 --- /dev/null +++ b/internal/testutil/interface.go @@ -0,0 +1,27 @@ +package testutil + +// TestingT is an interface wrapper around *testing.T that provides the methods +// that are used by the test package to convey information about test failures. +// +// We use an interface so we can wrap *testing.T and provide additional functionality. +type TestingT interface { + Log(args ...any) + Logf(format string, args ...any) + + Error(args ...any) + Errorf(format string, args ...any) + + Fatal(args ...any) + Fatalf(format string, args ...any) + + Skip(args ...any) + Skipf(format string, args ...any) + + FailNow() + + Cleanup(func()) + + Setenv(key, value string) + + TempDir() string +} diff --git a/internal/testutil/jdk.go b/internal/testutil/jdk.go index 05bd7d6d6..60fa439db 100644 --- a/internal/testutil/jdk.go +++ b/internal/testutil/jdk.go @@ -5,12 +5,11 @@ import ( "context" "os/exec" "strings" - "testing" "github.com/stretchr/testify/require" ) -func RequireJDK(t *testing.T, ctx context.Context, version string) { +func RequireJDK(t TestingT, ctx context.Context, version string) { var stderr bytes.Buffer cmd := exec.Command("javac", "-version") diff --git a/internal/testutil/requirement.go b/internal/testutil/requirement.go index 53855e0b5..e182b7518 100644 --- a/internal/testutil/requirement.go +++ b/internal/testutil/requirement.go @@ -1,18 +1,14 @@ package testutil -import ( - "testing" -) - // Requirement is the interface for test requirements. type Requirement interface { - Verify(t *testing.T) + Verify(t TestingT) } // Require should be called at the beginning of a test to ensure that all // requirements are met before running the test. // If any requirement is not met, the test will be skipped. -func Require(t *testing.T, requirements ...Requirement) { +func Require(t TestingT, requirements ...Requirement) { for _, r := range requirements { r.Verify(t) } diff --git a/internal/testutil/testutil_test.go b/internal/testutil/testutil_test.go new file mode 100644 index 000000000..d41374d55 --- /dev/null +++ b/internal/testutil/testutil_test.go @@ -0,0 +1,36 @@ +package testutil_test + +import ( + "go/parser" + "go/token" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// TestNoTestingImport checks that no file in the package imports the testing package. +// All exported functions must use the TestingT interface instead of *testing.T. +func TestNoTestingImport(t *testing.T) { + // Parse the package + fset := token.NewFileSet() + pkgs, err := parser.ParseDir(fset, ".", nil, parser.AllErrors) + require.NoError(t, err) + + // Iterate through the files in the package + for _, pkg := range pkgs { + for _, file := range pkg.Files { + // Skip test files + if strings.HasSuffix(fset.Position(file.Pos()).Filename, "_test.go") { + continue + } + // Check the imports of each file + for _, imp := range file.Imports { + if imp.Path.Value == `"testing"` { + assert.Fail(t, "File imports the testing package", "File %s imports the testing package", fset.Position(file.Pos()).Filename) + } + } + } + } +} diff --git a/libs/auth/callback.go b/libs/auth/callback.go index 5a2400697..3893a5041 100644 --- a/libs/auth/callback.go +++ b/libs/auth/callback.go @@ -53,7 +53,9 @@ func newCallback(ctx context.Context, a *PersistentAuth) (*callbackServer, error a: a, } cb.srv.Handler = cb - go cb.srv.Serve(cb.ln) + go func() { + _ = cb.srv.Serve(cb.ln) + }() return cb, nil } diff --git a/libs/cmdgroup/command_test.go b/libs/cmdgroup/command_test.go index f3e3fe6ab..2c248f09f 100644 --- a/libs/cmdgroup/command_test.go +++ b/libs/cmdgroup/command_test.go @@ -42,7 +42,8 @@ func TestCommandFlagGrouping(t *testing.T) { buf := bytes.NewBuffer(nil) cmd.SetOutput(buf) - cmd.Usage() + err := cmd.Usage() + require.NoError(t, err) expected := `Usage: parent test [flags] diff --git a/libs/cmdio/io.go b/libs/cmdio/io.go index 75c0c4b87..c0e9e868a 100644 --- a/libs/cmdio/io.go +++ b/libs/cmdio/io.go @@ -31,9 +31,9 @@ type cmdIO struct { err io.Writer } -func NewIO(outputFormat flags.Output, in io.Reader, out io.Writer, err io.Writer, headerTemplate, template string) *cmdIO { +func NewIO(ctx context.Context, outputFormat flags.Output, in io.Reader, out, err io.Writer, headerTemplate, template string) *cmdIO { // The check below is similar to color.NoColor but uses the specified err writer. - dumb := os.Getenv("NO_COLOR") != "" || os.Getenv("TERM") == "dumb" + dumb := env.Get(ctx, "NO_COLOR") != "" || env.Get(ctx, "TERM") == "dumb" if f, ok := err.(*os.File); ok && !dumb { dumb = !isatty.IsTerminal(f.Fd()) && !isatty.IsCygwinTerminal(f.Fd()) } diff --git a/libs/cmdio/logger.go b/libs/cmdio/logger.go index 45b1883ce..7bc95e9a5 100644 --- a/libs/cmdio/logger.go +++ b/libs/cmdio/logger.go @@ -151,7 +151,7 @@ func (l *Logger) AskSelect(question string, choices []string) (string, error) { return ans, nil } -func (l *Logger) Ask(question string, defaultVal string) (string, error) { +func (l *Logger) Ask(question, defaultVal string) (string, error) { if l.Mode == flags.ModeJson { return "", fmt.Errorf("question prompts are not supported in json mode") } @@ -188,29 +188,29 @@ func (l *Logger) writeJson(event Event) { // we panic because there we cannot catch this in jobs.RunNowAndWait panic(err) } - l.Writer.Write([]byte(b)) - l.Writer.Write([]byte("\n")) + _, _ = l.Writer.Write([]byte(b)) + _, _ = l.Writer.Write([]byte("\n")) } func (l *Logger) writeAppend(event Event) { - l.Writer.Write([]byte(event.String())) - l.Writer.Write([]byte("\n")) + _, _ = l.Writer.Write([]byte(event.String())) + _, _ = l.Writer.Write([]byte("\n")) } func (l *Logger) writeInplace(event Event) { if l.isFirstEvent { // save cursor location - l.Writer.Write([]byte("\033[s")) + _, _ = l.Writer.Write([]byte("\033[s")) } // move cursor to saved location - l.Writer.Write([]byte("\033[u")) + _, _ = l.Writer.Write([]byte("\033[u")) // clear from cursor to end of screen - l.Writer.Write([]byte("\033[0J")) + _, _ = l.Writer.Write([]byte("\033[0J")) - l.Writer.Write([]byte(event.String())) - l.Writer.Write([]byte("\n")) + _, _ = l.Writer.Write([]byte(event.String())) + _, _ = l.Writer.Write([]byte("\n")) l.isFirstEvent = false } @@ -234,5 +234,4 @@ func (l *Logger) Log(event Event) { // jobs.RunNowAndWait panic("unknown progress logger mode: " + l.Mode.String()) } - } diff --git a/libs/cmdio/render.go b/libs/cmdio/render.go index c68ddca0d..1529274a3 100644 --- a/libs/cmdio/render.go +++ b/libs/cmdio/render.go @@ -361,7 +361,9 @@ func renderUsingTemplate(ctx context.Context, r templateRenderer, w io.Writer, h if err != nil { return err } - tw.Write([]byte("\n")) + if _, err := tw.Write([]byte("\n")); err != nil { + return err + } // Do not flush here. Instead, allow the first 100 resources to determine the initial spacing of the header columns. } t, err := base.Parse(tmpl) diff --git a/libs/cmdio/render_test.go b/libs/cmdio/render_test.go index 6bde446c4..f26190a23 100644 --- a/libs/cmdio/render_test.go +++ b/libs/cmdio/render_test.go @@ -171,8 +171,9 @@ func TestRender(t *testing.T) { for _, c := range testCases { t.Run(c.name, func(t *testing.T) { output := &bytes.Buffer{} - cmdIO := NewIO(c.outputFormat, nil, output, output, c.headerTemplate, c.template) - ctx := InContext(context.Background(), cmdIO) + ctx := context.Background() + cmdIO := NewIO(ctx, c.outputFormat, nil, output, output, c.headerTemplate, c.template) + ctx = InContext(ctx, cmdIO) var err error if vv, ok := c.v.(listing.Iterator[*provisioning.Workspace]); ok { err = RenderIterator(ctx, vv) diff --git a/libs/databrickscfg/cfgpickers/clusters.go b/libs/databrickscfg/cfgpickers/clusters.go index cac1b08a7..6ae7d99c6 100644 --- a/libs/databrickscfg/cfgpickers/clusters.go +++ b/libs/databrickscfg/cfgpickers/clusters.go @@ -18,8 +18,10 @@ import ( var minUcRuntime = canonicalVersion("v12.0") -var dbrVersionRegex = regexp.MustCompile(`^(\d+\.\d+)\.x-.*`) -var dbrSnapshotVersionRegex = regexp.MustCompile(`^(\d+)\.x-snapshot.*`) +var ( + dbrVersionRegex = regexp.MustCompile(`^(\d+\.\d+)\.x-.*`) + dbrSnapshotVersionRegex = regexp.MustCompile(`^(\d+)\.x-snapshot.*`) +) func canonicalVersion(v string) string { return semver.Canonical("v" + strings.TrimPrefix(v, "v")) diff --git a/libs/databrickscfg/cfgpickers/clusters_test.go b/libs/databrickscfg/cfgpickers/clusters_test.go index d17e86d4a..cde09aa44 100644 --- a/libs/databrickscfg/cfgpickers/clusters_test.go +++ b/libs/databrickscfg/cfgpickers/clusters_test.go @@ -115,7 +115,7 @@ func TestFirstCompatibleCluster(t *testing.T) { w := databricks.Must(databricks.NewWorkspaceClient((*databricks.Config)(cfg))) ctx := context.Background() - ctx = cmdio.InContext(ctx, cmdio.NewIO(flags.OutputText, &bytes.Buffer{}, &bytes.Buffer{}, &bytes.Buffer{}, "", "...")) + ctx = cmdio.InContext(ctx, cmdio.NewIO(ctx, flags.OutputText, &bytes.Buffer{}, &bytes.Buffer{}, &bytes.Buffer{}, "", "...")) clusterID, err := AskForCluster(ctx, w, WithDatabricksConnect("13.1")) require.NoError(t, err) require.Equal(t, "bcd-id", clusterID) @@ -162,7 +162,7 @@ func TestNoCompatibleClusters(t *testing.T) { w := databricks.Must(databricks.NewWorkspaceClient((*databricks.Config)(cfg))) ctx := context.Background() - ctx = cmdio.InContext(ctx, cmdio.NewIO(flags.OutputText, &bytes.Buffer{}, &bytes.Buffer{}, &bytes.Buffer{}, "", "...")) + ctx = cmdio.InContext(ctx, cmdio.NewIO(ctx, flags.OutputText, &bytes.Buffer{}, &bytes.Buffer{}, &bytes.Buffer{}, "", "...")) _, err := AskForCluster(ctx, w, WithDatabricksConnect("13.1")) require.Equal(t, ErrNoCompatibleClusters, err) } diff --git a/libs/dyn/convert/from_typed.go b/libs/dyn/convert/from_typed.go index cd92ad0eb..ed1b85a36 100644 --- a/libs/dyn/convert/from_typed.go +++ b/libs/dyn/convert/from_typed.go @@ -126,7 +126,7 @@ func fromTypedStruct(src reflect.Value, ref dyn.Value, options ...fromTypedOptio // Either if the key was set in the reference or the field is not zero-valued, we include it. if ok || nv.Kind() != dyn.KindNil { - out.Set(refk, nv) + out.Set(refk, nv) // nolint:errcheck } } @@ -184,7 +184,7 @@ func fromTypedMap(src reflect.Value, ref dyn.Value) (dyn.Value, error) { // Every entry is represented, even if it is a nil. // Otherwise, a map with zero-valued structs would yield a nil as well. - out.Set(refk, nv) + out.Set(refk, nv) //nolint:errcheck } return dyn.V(out), nil diff --git a/libs/dyn/convert/from_typed_test.go b/libs/dyn/convert/from_typed_test.go index 0cddff3be..8a05bfb38 100644 --- a/libs/dyn/convert/from_typed_test.go +++ b/libs/dyn/convert/from_typed_test.go @@ -325,7 +325,7 @@ func TestFromTypedMapNil(t *testing.T) { } func TestFromTypedMapEmpty(t *testing.T) { - var src = map[string]string{} + src := map[string]string{} ref := dyn.V(map[string]dyn.Value{ "foo": dyn.V("bar"), @@ -338,7 +338,7 @@ func TestFromTypedMapEmpty(t *testing.T) { } func TestFromTypedMapNonEmpty(t *testing.T) { - var src = map[string]string{ + src := map[string]string{ "foo": "foo", "bar": "bar", } @@ -353,7 +353,7 @@ func TestFromTypedMapNonEmpty(t *testing.T) { } func TestFromTypedMapNonEmptyRetainLocation(t *testing.T) { - var src = map[string]string{ + src := map[string]string{ "foo": "bar", "bar": "qux", } @@ -372,7 +372,7 @@ func TestFromTypedMapNonEmptyRetainLocation(t *testing.T) { } func TestFromTypedMapFieldWithZeroValue(t *testing.T) { - var src = map[string]string{ + src := map[string]string{ "foo": "", } @@ -398,7 +398,7 @@ func TestFromTypedSliceNil(t *testing.T) { } func TestFromTypedSliceEmpty(t *testing.T) { - var src = []string{} + src := []string{} ref := dyn.V([]dyn.Value{ dyn.V("bar"), @@ -411,7 +411,7 @@ func TestFromTypedSliceEmpty(t *testing.T) { } func TestFromTypedSliceNonEmpty(t *testing.T) { - var src = []string{ + src := []string{ "foo", "bar", } @@ -426,7 +426,7 @@ func TestFromTypedSliceNonEmpty(t *testing.T) { } func TestFromTypedSliceNonEmptyRetainLocation(t *testing.T) { - var src = []string{ + src := []string{ "foo", "bar", } @@ -446,7 +446,7 @@ func TestFromTypedSliceNonEmptyRetainLocation(t *testing.T) { func TestFromTypedStringEmpty(t *testing.T) { var src string - var ref = dyn.NilValue + ref := dyn.NilValue nv, err := FromTyped(src, ref) require.NoError(t, err) assert.Equal(t, dyn.NilValue, nv) @@ -454,7 +454,7 @@ func TestFromTypedStringEmpty(t *testing.T) { func TestFromTypedStringEmptyOverwrite(t *testing.T) { var src string - var ref = dyn.V("old") + ref := dyn.V("old") nv, err := FromTyped(src, ref) require.NoError(t, err) assert.Equal(t, dyn.V(""), nv) @@ -462,7 +462,7 @@ func TestFromTypedStringEmptyOverwrite(t *testing.T) { func TestFromTypedStringNonEmpty(t *testing.T) { var src string = "new" - var ref = dyn.NilValue + ref := dyn.NilValue nv, err := FromTyped(src, ref) require.NoError(t, err) assert.Equal(t, dyn.V("new"), nv) @@ -470,14 +470,14 @@ func TestFromTypedStringNonEmpty(t *testing.T) { func TestFromTypedStringNonEmptyOverwrite(t *testing.T) { var src string = "new" - var ref = dyn.V("old") + ref := dyn.V("old") nv, err := FromTyped(src, ref) require.NoError(t, err) assert.Equal(t, dyn.V("new"), nv) } func TestFromTypedStringRetainsLocations(t *testing.T) { - var ref = dyn.NewValue("foo", []dyn.Location{{File: "foo"}}) + ref := dyn.NewValue("foo", []dyn.Location{{File: "foo"}}) // case: value has not been changed var src string = "foo" @@ -494,14 +494,14 @@ func TestFromTypedStringRetainsLocations(t *testing.T) { func TestFromTypedStringTypeError(t *testing.T) { var src string = "foo" - var ref = dyn.V(1234) + ref := dyn.V(1234) _, err := FromTyped(src, ref) require.Error(t, err) } func TestFromTypedBoolEmpty(t *testing.T) { var src bool - var ref = dyn.NilValue + ref := dyn.NilValue nv, err := FromTyped(src, ref) require.NoError(t, err) assert.Equal(t, dyn.NilValue, nv) @@ -509,7 +509,7 @@ func TestFromTypedBoolEmpty(t *testing.T) { func TestFromTypedBoolEmptyOverwrite(t *testing.T) { var src bool - var ref = dyn.V(true) + ref := dyn.V(true) nv, err := FromTyped(src, ref) require.NoError(t, err) assert.Equal(t, dyn.V(false), nv) @@ -517,7 +517,7 @@ func TestFromTypedBoolEmptyOverwrite(t *testing.T) { func TestFromTypedBoolNonEmpty(t *testing.T) { var src bool = true - var ref = dyn.NilValue + ref := dyn.NilValue nv, err := FromTyped(src, ref) require.NoError(t, err) assert.Equal(t, dyn.V(true), nv) @@ -525,14 +525,14 @@ func TestFromTypedBoolNonEmpty(t *testing.T) { func TestFromTypedBoolNonEmptyOverwrite(t *testing.T) { var src bool = true - var ref = dyn.V(false) + ref := dyn.V(false) nv, err := FromTyped(src, ref) require.NoError(t, err) assert.Equal(t, dyn.V(true), nv) } func TestFromTypedBoolRetainsLocations(t *testing.T) { - var ref = dyn.NewValue(true, []dyn.Location{{File: "foo"}}) + ref := dyn.NewValue(true, []dyn.Location{{File: "foo"}}) // case: value has not been changed var src bool = true @@ -549,7 +549,7 @@ func TestFromTypedBoolRetainsLocations(t *testing.T) { func TestFromTypedBoolVariableReference(t *testing.T) { var src bool = true - var ref = dyn.V("${var.foo}") + ref := dyn.V("${var.foo}") nv, err := FromTyped(src, ref) require.NoError(t, err) assert.Equal(t, dyn.V("${var.foo}"), nv) @@ -557,14 +557,14 @@ func TestFromTypedBoolVariableReference(t *testing.T) { func TestFromTypedBoolTypeError(t *testing.T) { var src bool = true - var ref = dyn.V("string") + ref := dyn.V("string") _, err := FromTyped(src, ref) require.Error(t, err) } func TestFromTypedIntEmpty(t *testing.T) { var src int - var ref = dyn.NilValue + ref := dyn.NilValue nv, err := FromTyped(src, ref) require.NoError(t, err) assert.Equal(t, dyn.NilValue, nv) @@ -572,7 +572,7 @@ func TestFromTypedIntEmpty(t *testing.T) { func TestFromTypedIntEmptyOverwrite(t *testing.T) { var src int - var ref = dyn.V(1234) + ref := dyn.V(1234) nv, err := FromTyped(src, ref) require.NoError(t, err) assert.Equal(t, dyn.V(int64(0)), nv) @@ -580,7 +580,7 @@ func TestFromTypedIntEmptyOverwrite(t *testing.T) { func TestFromTypedIntNonEmpty(t *testing.T) { var src int = 1234 - var ref = dyn.NilValue + ref := dyn.NilValue nv, err := FromTyped(src, ref) require.NoError(t, err) assert.Equal(t, dyn.V(int64(1234)), nv) @@ -588,14 +588,14 @@ func TestFromTypedIntNonEmpty(t *testing.T) { func TestFromTypedIntNonEmptyOverwrite(t *testing.T) { var src int = 1234 - var ref = dyn.V(1233) + ref := dyn.V(1233) nv, err := FromTyped(src, ref) require.NoError(t, err) assert.Equal(t, dyn.V(int64(1234)), nv) } func TestFromTypedIntRetainsLocations(t *testing.T) { - var ref = dyn.NewValue(1234, []dyn.Location{{File: "foo"}}) + ref := dyn.NewValue(1234, []dyn.Location{{File: "foo"}}) // case: value has not been changed var src int = 1234 @@ -612,7 +612,7 @@ func TestFromTypedIntRetainsLocations(t *testing.T) { func TestFromTypedIntVariableReference(t *testing.T) { var src int = 1234 - var ref = dyn.V("${var.foo}") + ref := dyn.V("${var.foo}") nv, err := FromTyped(src, ref) require.NoError(t, err) assert.Equal(t, dyn.V("${var.foo}"), nv) @@ -620,14 +620,14 @@ func TestFromTypedIntVariableReference(t *testing.T) { func TestFromTypedIntTypeError(t *testing.T) { var src int = 1234 - var ref = dyn.V("string") + ref := dyn.V("string") _, err := FromTyped(src, ref) require.Error(t, err) } func TestFromTypedFloatEmpty(t *testing.T) { var src float64 - var ref = dyn.NilValue + ref := dyn.NilValue nv, err := FromTyped(src, ref) require.NoError(t, err) assert.Equal(t, dyn.NilValue, nv) @@ -635,7 +635,7 @@ func TestFromTypedFloatEmpty(t *testing.T) { func TestFromTypedFloatEmptyOverwrite(t *testing.T) { var src float64 - var ref = dyn.V(1.23) + ref := dyn.V(1.23) nv, err := FromTyped(src, ref) require.NoError(t, err) assert.Equal(t, dyn.V(0.0), nv) @@ -643,7 +643,7 @@ func TestFromTypedFloatEmptyOverwrite(t *testing.T) { func TestFromTypedFloatNonEmpty(t *testing.T) { var src float64 = 1.23 - var ref = dyn.NilValue + ref := dyn.NilValue nv, err := FromTyped(src, ref) require.NoError(t, err) assert.Equal(t, dyn.V(1.23), nv) @@ -651,7 +651,7 @@ func TestFromTypedFloatNonEmpty(t *testing.T) { func TestFromTypedFloatNonEmptyOverwrite(t *testing.T) { var src float64 = 1.23 - var ref = dyn.V(1.24) + ref := dyn.V(1.24) nv, err := FromTyped(src, ref) require.NoError(t, err) assert.Equal(t, dyn.V(1.23), nv) @@ -659,7 +659,7 @@ func TestFromTypedFloatNonEmptyOverwrite(t *testing.T) { func TestFromTypedFloatRetainsLocations(t *testing.T) { var src float64 - var ref = dyn.NewValue(1.23, []dyn.Location{{File: "foo"}}) + ref := dyn.NewValue(1.23, []dyn.Location{{File: "foo"}}) // case: value has not been changed src = 1.23 @@ -676,7 +676,7 @@ func TestFromTypedFloatRetainsLocations(t *testing.T) { func TestFromTypedFloatVariableReference(t *testing.T) { var src float64 = 1.23 - var ref = dyn.V("${var.foo}") + ref := dyn.V("${var.foo}") nv, err := FromTyped(src, ref) require.NoError(t, err) assert.Equal(t, dyn.V("${var.foo}"), nv) @@ -684,7 +684,7 @@ func TestFromTypedFloatVariableReference(t *testing.T) { func TestFromTypedFloatTypeError(t *testing.T) { var src float64 = 1.23 - var ref = dyn.V("string") + ref := dyn.V("string") _, err := FromTyped(src, ref) require.Error(t, err) } @@ -727,7 +727,7 @@ func TestFromTypedAny(t *testing.T) { func TestFromTypedAnyNil(t *testing.T) { var src any = nil - var ref = dyn.NilValue + ref := dyn.NilValue nv, err := FromTyped(src, ref) require.NoError(t, err) assert.Equal(t, dyn.NilValue, nv) diff --git a/libs/dyn/convert/normalize.go b/libs/dyn/convert/normalize.go index 106add35d..31cd8b6e3 100644 --- a/libs/dyn/convert/normalize.go +++ b/libs/dyn/convert/normalize.go @@ -116,7 +116,7 @@ func (n normalizeOptions) normalizeStruct(typ reflect.Type, src dyn.Value, seen } } - out.Set(pk, nv) + out.Set(pk, nv) //nolint:errcheck } // Return the normalized value if missing fields are not included. @@ -162,7 +162,7 @@ func (n normalizeOptions) normalizeStruct(typ reflect.Type, src dyn.Value, seen continue } if v.IsValid() { - out.Set(dyn.V(k), v) + out.Set(dyn.V(k), v) // nolint:errcheck } } @@ -201,7 +201,7 @@ func (n normalizeOptions) normalizeMap(typ reflect.Type, src dyn.Value, seen []r } } - out.Set(pk, nv) + out.Set(pk, nv) //nolint:errcheck } return dyn.NewValue(out, src.Locations()), diags diff --git a/libs/dyn/convert/normalize_test.go b/libs/dyn/convert/normalize_test.go index ab0a1cec1..449c09075 100644 --- a/libs/dyn/convert/normalize_test.go +++ b/libs/dyn/convert/normalize_test.go @@ -6,6 +6,7 @@ import ( "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/dyn" assert "github.com/databricks/cli/libs/dyn/dynassert" + "github.com/stretchr/testify/require" ) func TestNormalizeStruct(t *testing.T) { @@ -20,8 +21,8 @@ func TestNormalizeStruct(t *testing.T) { "bar": dyn.V("baz"), }) - vout, err := Normalize(typ, vin) - assert.Empty(t, err) + vout, diags := Normalize(typ, vin) + assert.Empty(t, diags) assert.Equal(t, vin, vout) } @@ -37,14 +38,14 @@ func TestNormalizeStructElementDiagnostic(t *testing.T) { "bar": dyn.V(map[string]dyn.Value{"an": dyn.V("error")}), }) - vout, err := Normalize(typ, vin) - assert.Len(t, err, 1) + vout, diags := Normalize(typ, vin) + assert.Len(t, diags, 1) assert.Equal(t, diag.Diagnostic{ Severity: diag.Warning, Summary: `expected string, found map`, Locations: []dyn.Location{{}}, Paths: []dyn.Path{dyn.NewPath(dyn.Key("bar"))}, - }, err[0]) + }, diags[0]) // Elements that encounter an error during normalization are dropped. assert.Equal(t, map[string]any{ @@ -60,17 +61,20 @@ func TestNormalizeStructUnknownField(t *testing.T) { var typ Tmp m := dyn.NewMapping() - m.Set(dyn.V("foo"), dyn.V("val-foo")) + err := m.Set(dyn.V("foo"), dyn.V("val-foo")) + require.NoError(t, err) + // Set the unknown field, with location information. - m.Set(dyn.NewValue("bar", []dyn.Location{ + err = m.Set(dyn.NewValue("bar", []dyn.Location{ {File: "hello.yaml", Line: 1, Column: 1}, {File: "world.yaml", Line: 2, Column: 2}, }), dyn.V("var-bar")) + require.NoError(t, err) vin := dyn.V(m) - vout, err := Normalize(typ, vin) - assert.Len(t, err, 1) + vout, diags := Normalize(typ, vin) + assert.Len(t, diags, 1) assert.Equal(t, diag.Diagnostic{ Severity: diag.Warning, Summary: `unknown field: bar`, @@ -80,7 +84,7 @@ func TestNormalizeStructUnknownField(t *testing.T) { {File: "world.yaml", Line: 2, Column: 2}, }, Paths: []dyn.Path{dyn.EmptyPath}, - }, err[0]) + }, diags[0]) // The field that can be mapped to the struct field is retained. assert.Equal(t, map[string]any{ diff --git a/libs/dyn/convert/struct_info.go b/libs/dyn/convert/struct_info.go index dc3ed4da4..f5fd29cb9 100644 --- a/libs/dyn/convert/struct_info.go +++ b/libs/dyn/convert/struct_info.go @@ -43,7 +43,7 @@ func getStructInfo(typ reflect.Type) structInfo { // buildStructInfo populates a new [structInfo] for the given type. func buildStructInfo(typ reflect.Type) structInfo { - var out = structInfo{ + out := structInfo{ Fields: make(map[string][]int), } @@ -102,7 +102,7 @@ func buildStructInfo(typ reflect.Type) structInfo { } func (s *structInfo) FieldValues(v reflect.Value) map[string]reflect.Value { - var out = make(map[string]reflect.Value) + out := make(map[string]reflect.Value) for k, index := range s.Fields { fv := v diff --git a/libs/dyn/convert/struct_info_test.go b/libs/dyn/convert/struct_info_test.go index 20348ff60..bc10db9da 100644 --- a/libs/dyn/convert/struct_info_test.go +++ b/libs/dyn/convert/struct_info_test.go @@ -95,7 +95,7 @@ func TestStructInfoFieldValues(t *testing.T) { Bar string `json:"bar"` } - var src = Tmp{ + src := Tmp{ Foo: "foo", Bar: "bar", } @@ -121,7 +121,7 @@ func TestStructInfoFieldValuesAnonymousByValue(t *testing.T) { Foo } - var src = Tmp{ + src := Tmp{ Foo: Foo{ Foo: "foo", Bar: Bar{ diff --git a/libs/dyn/convert/to_typed_test.go b/libs/dyn/convert/to_typed_test.go index 78221c299..4a56dd4fc 100644 --- a/libs/dyn/convert/to_typed_test.go +++ b/libs/dyn/convert/to_typed_test.go @@ -44,7 +44,7 @@ func TestToTypedStructOverwrite(t *testing.T) { Qux string `json:"-"` } - var out = Tmp{ + out := Tmp{ Foo: "baz", Bar: "qux", } @@ -66,7 +66,7 @@ func TestToTypedStructClearFields(t *testing.T) { } // Struct value with non-empty fields. - var out = Tmp{ + out := Tmp{ Foo: "baz", Bar: "qux", } @@ -137,7 +137,7 @@ func TestToTypedStructNil(t *testing.T) { Foo string `json:"foo"` } - var out = Tmp{} + out := Tmp{} err := ToTyped(&out, dyn.NilValue) require.NoError(t, err) assert.Equal(t, Tmp{}, out) @@ -148,7 +148,7 @@ func TestToTypedStructNilOverwrite(t *testing.T) { Foo string `json:"foo"` } - var out = Tmp{"bar"} + out := Tmp{"bar"} err := ToTyped(&out, dyn.NilValue) require.NoError(t, err) assert.Equal(t, Tmp{}, out) @@ -173,7 +173,7 @@ func TestToTypedStructWithValueField(t *testing.T) { } func TestToTypedMap(t *testing.T) { - var out = map[string]string{} + out := map[string]string{} v := dyn.V(map[string]dyn.Value{ "key": dyn.V("value"), @@ -186,7 +186,7 @@ func TestToTypedMap(t *testing.T) { } func TestToTypedMapOverwrite(t *testing.T) { - var out = map[string]string{ + out := map[string]string{ "foo": "bar", } @@ -214,14 +214,14 @@ func TestToTypedMapWithPointerElement(t *testing.T) { } func TestToTypedMapNil(t *testing.T) { - var out = map[string]string{} + out := map[string]string{} err := ToTyped(&out, dyn.NilValue) require.NoError(t, err) assert.Nil(t, out) } func TestToTypedMapNilOverwrite(t *testing.T) { - var out = map[string]string{ + out := map[string]string{ "foo": "bar", } err := ToTyped(&out, dyn.NilValue) @@ -245,7 +245,7 @@ func TestToTypedSlice(t *testing.T) { } func TestToTypedSliceOverwrite(t *testing.T) { - var out = []string{"qux"} + out := []string{"qux"} v := dyn.V([]dyn.Value{ dyn.V("foo"), @@ -282,7 +282,7 @@ func TestToTypedSliceNil(t *testing.T) { } func TestToTypedSliceNilOverwrite(t *testing.T) { - var out = []string{"foo"} + out := []string{"foo"} err := ToTyped(&out, dyn.NilValue) require.NoError(t, err) assert.Nil(t, out) diff --git a/libs/dyn/dynassert/assert.go b/libs/dyn/dynassert/assert.go index ebdba1214..616a588ec 100644 --- a/libs/dyn/dynassert/assert.go +++ b/libs/dyn/dynassert/assert.go @@ -5,7 +5,7 @@ import ( "github.com/stretchr/testify/assert" ) -func Equal(t assert.TestingT, expected any, actual any, msgAndArgs ...any) bool { +func Equal(t assert.TestingT, expected, actual any, msgAndArgs ...any) bool { ev, eok := expected.(dyn.Value) av, aok := actual.(dyn.Value) if eok && aok && ev.IsValid() && av.IsValid() { @@ -36,7 +36,7 @@ func EqualValues(t assert.TestingT, expected, actual any, msgAndArgs ...any) boo return assert.EqualValues(t, expected, actual, msgAndArgs...) } -func NotEqual(t assert.TestingT, expected any, actual any, msgAndArgs ...any) bool { +func NotEqual(t assert.TestingT, expected, actual any, msgAndArgs ...any) bool { return assert.NotEqual(t, expected, actual, msgAndArgs...) } @@ -84,11 +84,11 @@ func False(t assert.TestingT, value bool, msgAndArgs ...any) bool { return assert.False(t, value, msgAndArgs...) } -func Contains(t assert.TestingT, list any, element any, msgAndArgs ...any) bool { +func Contains(t assert.TestingT, list, element any, msgAndArgs ...any) bool { return assert.Contains(t, list, element, msgAndArgs...) } -func NotContains(t assert.TestingT, list any, element any, msgAndArgs ...any) bool { +func NotContains(t assert.TestingT, list, element any, msgAndArgs ...any) bool { return assert.NotContains(t, list, element, msgAndArgs...) } @@ -112,6 +112,6 @@ func NotPanics(t assert.TestingT, f func(), msgAndArgs ...any) bool { return assert.NotPanics(t, f, msgAndArgs...) } -func JSONEq(t assert.TestingT, expected string, actual string, msgAndArgs ...any) bool { +func JSONEq(t assert.TestingT, expected, actual string, msgAndArgs ...any) bool { return assert.JSONEq(t, expected, actual, msgAndArgs...) } diff --git a/libs/dyn/dynassert/assert_test.go b/libs/dyn/dynassert/assert_test.go index 43258bd20..c8c2d6960 100644 --- a/libs/dyn/dynassert/assert_test.go +++ b/libs/dyn/dynassert/assert_test.go @@ -13,7 +13,7 @@ import ( ) func TestThatThisTestPackageIsUsed(t *testing.T) { - var base = ".." + base := ".." var files []string err := fs.WalkDir(os.DirFS(base), ".", func(path string, d fs.DirEntry, err error) error { if d.IsDir() { diff --git a/libs/dyn/jsonloader/json.go b/libs/dyn/jsonloader/json.go index cbf539263..3f2dc859f 100644 --- a/libs/dyn/jsonloader/json.go +++ b/libs/dyn/jsonloader/json.go @@ -70,7 +70,7 @@ func decodeValue(decoder *json.Decoder, o *Offset) (dyn.Value, error) { return invalidValueWithLocation(decoder, o), err } - obj.Set(keyVal, val) + obj.Set(keyVal, val) //nolint:errcheck } // Consume the closing '}' if _, err := decoder.Token(); err != nil { diff --git a/libs/dyn/jsonsaver/marshal_test.go b/libs/dyn/jsonsaver/marshal_test.go index 0b6a34283..e8897ea49 100644 --- a/libs/dyn/jsonsaver/marshal_test.go +++ b/libs/dyn/jsonsaver/marshal_test.go @@ -5,6 +5,7 @@ import ( "github.com/databricks/cli/libs/dyn" assert "github.com/databricks/cli/libs/dyn/dynassert" + "github.com/stretchr/testify/require" ) func TestMarshal_String(t *testing.T) { @@ -44,8 +45,8 @@ func TestMarshal_Time(t *testing.T) { func TestMarshal_Map(t *testing.T) { m := dyn.NewMapping() - m.Set(dyn.V("key1"), dyn.V("value1")) - m.Set(dyn.V("key2"), dyn.V("value2")) + require.NoError(t, m.Set(dyn.V("key1"), dyn.V("value1"))) + require.NoError(t, m.Set(dyn.V("key2"), dyn.V("value2"))) b, err := Marshal(dyn.V(m)) if assert.NoError(t, err) { @@ -66,16 +67,16 @@ func TestMarshal_Sequence(t *testing.T) { func TestMarshal_Complex(t *testing.T) { map1 := dyn.NewMapping() - map1.Set(dyn.V("str1"), dyn.V("value1")) - map1.Set(dyn.V("str2"), dyn.V("value2")) + require.NoError(t, map1.Set(dyn.V("str1"), dyn.V("value1"))) + require.NoError(t, map1.Set(dyn.V("str2"), dyn.V("value2"))) seq1 := []dyn.Value{} seq1 = append(seq1, dyn.V("value1")) seq1 = append(seq1, dyn.V("value2")) root := dyn.NewMapping() - root.Set(dyn.V("map1"), dyn.V(map1)) - root.Set(dyn.V("seq1"), dyn.V(seq1)) + require.NoError(t, root.Set(dyn.V("map1"), dyn.V(map1))) + require.NoError(t, root.Set(dyn.V("seq1"), dyn.V(seq1))) // Marshal without indent. b, err := Marshal(dyn.V(root)) diff --git a/libs/dyn/mapping.go b/libs/dyn/mapping.go index f9f2d2e97..3c7c4e96e 100644 --- a/libs/dyn/mapping.go +++ b/libs/dyn/mapping.go @@ -41,7 +41,7 @@ func newMappingWithSize(size int) Mapping { func newMappingFromGoMap(vin map[string]Value) Mapping { m := newMappingWithSize(len(vin)) for k, v := range vin { - m.Set(V(k), v) + m.Set(V(k), v) //nolint:errcheck } return m } @@ -94,7 +94,7 @@ func (m *Mapping) GetByString(skey string) (Value, bool) { // If the key already exists, the value is updated. // If the key does not exist, a new key-value pair is added. // The key must be a string, otherwise an error is returned. -func (m *Mapping) Set(key Value, value Value) error { +func (m *Mapping) Set(key, value Value) error { skey, ok := key.AsString() if !ok { return fmt.Errorf("key must be a string, got %s", key.Kind()) @@ -144,6 +144,6 @@ func (m Mapping) Clone() Mapping { // Merge merges the key-value pairs from another Mapping into the current Mapping. func (m *Mapping) Merge(n Mapping) { for _, p := range n.pairs { - m.Set(p.Key, p.Value) + m.Set(p.Key, p.Value) //nolint:errcheck } } diff --git a/libs/dyn/merge/merge.go b/libs/dyn/merge/merge.go index 29decd779..72d9a7d28 100644 --- a/libs/dyn/merge/merge.go +++ b/libs/dyn/merge/merge.go @@ -88,10 +88,10 @@ func mergeMap(a, b dyn.Value) (dyn.Value, error) { if err != nil { return dyn.InvalidValue, err } - out.Set(pk, merged) + out.Set(pk, merged) //nolint:errcheck } else { // Otherwise, just set the value. - out.Set(pk, pv) + out.Set(pk, pv) //nolint:errcheck } } @@ -111,6 +111,7 @@ func mergeSequence(a, b dyn.Value) (dyn.Value, error) { // Preserve the location of the first value. Accumulate the locations of the second value. return dyn.NewValue(out, a.Locations()).AppendLocationsFromValue(b), nil } + func mergePrimitive(a, b dyn.Value) (dyn.Value, error) { // Merging primitive values means using the incoming value. return b.AppendLocationsFromValue(a), nil diff --git a/libs/dyn/merge/merge_test.go b/libs/dyn/merge/merge_test.go index 4a4bf9e6c..bfe772016 100644 --- a/libs/dyn/merge/merge_test.go +++ b/libs/dyn/merge/merge_test.go @@ -75,7 +75,6 @@ func TestMergeMaps(t *testing.T) { assert.Equal(t, l1, out.Get("foo").Location()) assert.Equal(t, l2, out.Get("qux").Location()) } - } func TestMergeMapsNil(t *testing.T) { diff --git a/libs/dyn/merge/override.go b/libs/dyn/merge/override.go index 7a8667cd6..ca62c7305 100644 --- a/libs/dyn/merge/override.go +++ b/libs/dyn/merge/override.go @@ -23,7 +23,7 @@ import ( type OverrideVisitor struct { VisitDelete func(valuePath dyn.Path, left dyn.Value) error VisitInsert func(valuePath dyn.Path, right dyn.Value) (dyn.Value, error) - VisitUpdate func(valuePath dyn.Path, left dyn.Value, right dyn.Value) (dyn.Value, error) + VisitUpdate func(valuePath dyn.Path, left, right dyn.Value) (dyn.Value, error) } var ErrOverrideUndoDelete = errors.New("undo delete operation") @@ -31,11 +31,11 @@ var ErrOverrideUndoDelete = errors.New("undo delete operation") // Override overrides value 'leftRoot' with 'rightRoot', keeping 'location' if values // haven't changed. Preserving 'location' is important to preserve the original source of the value // for error reporting. -func Override(leftRoot dyn.Value, rightRoot dyn.Value, visitor OverrideVisitor) (dyn.Value, error) { +func Override(leftRoot, rightRoot dyn.Value, visitor OverrideVisitor) (dyn.Value, error) { return override(dyn.EmptyPath, leftRoot, rightRoot, visitor) } -func override(basePath dyn.Path, left dyn.Value, right dyn.Value, visitor OverrideVisitor) (dyn.Value, error) { +func override(basePath dyn.Path, left, right dyn.Value, visitor OverrideVisitor) (dyn.Value, error) { if left.Kind() != right.Kind() { return visitor.VisitUpdate(basePath, left, right) } @@ -46,7 +46,6 @@ func override(basePath dyn.Path, left dyn.Value, right dyn.Value, visitor Overri switch left.Kind() { case dyn.KindMap: merged, err := overrideMapping(basePath, left.MustMap(), right.MustMap(), visitor) - if err != nil { return dyn.InvalidValue, err } @@ -57,7 +56,6 @@ func override(basePath dyn.Path, left dyn.Value, right dyn.Value, visitor Overri // some sequences are keyed, and we can detect which elements are added/removed/updated, // but we don't have this information merged, err := overrideSequence(basePath, left.MustSequence(), right.MustSequence(), visitor) - if err != nil { return dyn.InvalidValue, err } @@ -107,7 +105,7 @@ func override(basePath dyn.Path, left dyn.Value, right dyn.Value, visitor Overri return dyn.InvalidValue, fmt.Errorf("unexpected kind %s at %s", left.Kind(), basePath.String()) } -func overrideMapping(basePath dyn.Path, leftMapping dyn.Mapping, rightMapping dyn.Mapping, visitor OverrideVisitor) (dyn.Mapping, error) { +func overrideMapping(basePath dyn.Path, leftMapping, rightMapping dyn.Mapping, visitor OverrideVisitor) (dyn.Mapping, error) { out := dyn.NewMapping() for _, leftPair := range leftMapping.Pairs() { @@ -136,14 +134,12 @@ func overrideMapping(basePath dyn.Path, leftMapping dyn.Mapping, rightMapping dy if leftPair, ok := leftMapping.GetPair(rightPair.Key); ok { path := basePath.Append(dyn.Key(rightPair.Key.MustString())) newValue, err := override(path, leftPair.Value, rightPair.Value, visitor) - if err != nil { return dyn.NewMapping(), err } // key was there before, so keep its location err = out.Set(leftPair.Key, newValue) - if err != nil { return dyn.NewMapping(), err } @@ -151,13 +147,11 @@ func overrideMapping(basePath dyn.Path, leftMapping dyn.Mapping, rightMapping dy path := basePath.Append(dyn.Key(rightPair.Key.MustString())) newValue, err := visitor.VisitInsert(path, rightPair.Value) - if err != nil { return dyn.NewMapping(), err } err = out.Set(rightPair.Key, newValue) - if err != nil { return dyn.NewMapping(), err } @@ -167,14 +161,13 @@ func overrideMapping(basePath dyn.Path, leftMapping dyn.Mapping, rightMapping dy return out, nil } -func overrideSequence(basePath dyn.Path, left []dyn.Value, right []dyn.Value, visitor OverrideVisitor) ([]dyn.Value, error) { +func overrideSequence(basePath dyn.Path, left, right []dyn.Value, visitor OverrideVisitor) ([]dyn.Value, error) { minLen := min(len(left), len(right)) var values []dyn.Value for i := 0; i < minLen; i++ { path := basePath.Append(dyn.Index(i)) merged, err := override(path, left[i], right[i], visitor) - if err != nil { return nil, err } @@ -186,7 +179,6 @@ func overrideSequence(basePath dyn.Path, left []dyn.Value, right []dyn.Value, vi for i := minLen; i < len(right); i++ { path := basePath.Append(dyn.Index(i)) newValue, err := visitor.VisitInsert(path, right[i]) - if err != nil { return nil, err } diff --git a/libs/dyn/merge/override_test.go b/libs/dyn/merge/override_test.go index 264c32e5e..ea161d27c 100644 --- a/libs/dyn/merge/override_test.go +++ b/libs/dyn/merge/override_test.go @@ -432,10 +432,12 @@ func TestOverride_PreserveMappingKeys(t *testing.T) { rightValueLocation := dyn.Location{File: "right.yml", Line: 3, Column: 1} left := dyn.NewMapping() - left.Set(dyn.NewValue("a", []dyn.Location{leftKeyLocation}), dyn.NewValue(42, []dyn.Location{leftValueLocation})) + err := left.Set(dyn.NewValue("a", []dyn.Location{leftKeyLocation}), dyn.NewValue(42, []dyn.Location{leftValueLocation})) + require.NoError(t, err) right := dyn.NewMapping() - right.Set(dyn.NewValue("a", []dyn.Location{rightKeyLocation}), dyn.NewValue(7, []dyn.Location{rightValueLocation})) + err = right.Set(dyn.NewValue("a", []dyn.Location{rightKeyLocation}), dyn.NewValue(7, []dyn.Location{rightValueLocation})) + require.NoError(t, err) state, visitor := createVisitor(visitorOpts{}) @@ -482,7 +484,7 @@ func createVisitor(opts visitorOpts) (*visitorState, OverrideVisitor) { s := visitorState{} return &s, OverrideVisitor{ - VisitUpdate: func(valuePath dyn.Path, left dyn.Value, right dyn.Value) (dyn.Value, error) { + VisitUpdate: func(valuePath dyn.Path, left, right dyn.Value) (dyn.Value, error) { s.updated = append(s.updated, valuePath.String()) if opts.error != nil { diff --git a/libs/dyn/pattern.go b/libs/dyn/pattern.go index aecdc3ca6..2d2e9cae7 100644 --- a/libs/dyn/pattern.go +++ b/libs/dyn/pattern.go @@ -69,7 +69,7 @@ func (c anyKeyComponent) visit(v Value, prefix Path, suffix Pattern, opts visitO return InvalidValue, err } - m.Set(pk, nv) + m.Set(pk, nv) //nolint:errcheck } return NewValue(m, v.Locations()), nil diff --git a/libs/dyn/value_test.go b/libs/dyn/value_test.go index 6a0a27b8d..86e65858e 100644 --- a/libs/dyn/value_test.go +++ b/libs/dyn/value_test.go @@ -25,11 +25,11 @@ func TestValueAsMap(t *testing.T) { _, ok := zeroValue.AsMap() assert.False(t, ok) - var intValue = dyn.V(1) + intValue := dyn.V(1) _, ok = intValue.AsMap() assert.False(t, ok) - var mapValue = dyn.NewValue( + mapValue := dyn.NewValue( map[string]dyn.Value{ "key": dyn.NewValue( "value", @@ -46,6 +46,6 @@ func TestValueAsMap(t *testing.T) { func TestValueIsValid(t *testing.T) { var zeroValue dyn.Value assert.False(t, zeroValue.IsValid()) - var intValue = dyn.V(1) + intValue := dyn.V(1) assert.True(t, intValue.IsValid()) } diff --git a/libs/dyn/visit.go b/libs/dyn/visit.go index 38adec24f..95515115e 100644 --- a/libs/dyn/visit.go +++ b/libs/dyn/visit.go @@ -122,7 +122,7 @@ func (component pathComponent) visit(v Value, prefix Path, suffix Pattern, opts // Return an updated map value. m = m.Clone() - m.Set(V(component.key), nv) + m.Set(V(component.key), nv) //nolint:errcheck return Value{ v: m, k: KindMap, diff --git a/libs/dyn/visit_map.go b/libs/dyn/visit_map.go index 3f0cded03..db4526038 100644 --- a/libs/dyn/visit_map.go +++ b/libs/dyn/visit_map.go @@ -25,7 +25,7 @@ func Foreach(fn MapFunc) MapFunc { if err != nil { return InvalidValue, err } - m.Set(pk, nv) + m.Set(pk, nv) //nolint:errcheck } return NewValue(m, v.Locations()), nil case KindSequence: diff --git a/libs/dyn/visit_map_test.go b/libs/dyn/visit_map_test.go index 2cea0913b..d62327d6f 100644 --- a/libs/dyn/visit_map_test.go +++ b/libs/dyn/visit_map_test.go @@ -71,7 +71,7 @@ func TestMapFuncOnMap(t *testing.T) { }, vbar.AsAny()) // Return error from map function. - var ref = fmt.Errorf("error") + ref := fmt.Errorf("error") verr, err := dyn.MapByPath(vin, dyn.NewPath(dyn.Key("foo")), func(_ dyn.Path, v dyn.Value) (dyn.Value, error) { return dyn.InvalidValue, ref }) @@ -137,7 +137,7 @@ func TestMapFuncOnSequence(t *testing.T) { assert.Equal(t, []any{42, 45}, v1.AsAny()) // Return error from map function. - var ref = fmt.Errorf("error") + ref := fmt.Errorf("error") verr, err := dyn.MapByPath(vin, dyn.NewPath(dyn.Index(0)), func(_ dyn.Path, v dyn.Value) (dyn.Value, error) { return dyn.InvalidValue, ref }) @@ -211,7 +211,7 @@ func TestMapForeachOnMapError(t *testing.T) { }) // Check that an error from the map function propagates. - var ref = fmt.Errorf("error") + ref := fmt.Errorf("error") _, err := dyn.Map(vin, ".", dyn.Foreach(func(_ dyn.Path, v dyn.Value) (dyn.Value, error) { return dyn.InvalidValue, ref })) @@ -255,7 +255,7 @@ func TestMapForeachOnSequenceError(t *testing.T) { }) // Check that an error from the map function propagates. - var ref = fmt.Errorf("error") + ref := fmt.Errorf("error") _, err := dyn.Map(vin, ".", dyn.Foreach(func(_ dyn.Path, v dyn.Value) (dyn.Value, error) { return dyn.InvalidValue, ref })) diff --git a/libs/dyn/visit_set.go b/libs/dyn/visit_set.go index b086fb8a9..9991d311f 100644 --- a/libs/dyn/visit_set.go +++ b/libs/dyn/visit_set.go @@ -41,7 +41,7 @@ func SetByPath(v Value, p Path, nv Value) (Value, error) { // Return an updated map value. m = m.Clone() - m.Set(V(component.key), nv) + m.Set(V(component.key), nv) //nolint:errcheck return Value{ v: m, k: KindMap, diff --git a/libs/dyn/walk.go b/libs/dyn/walk.go index c51a11e22..b3576e088 100644 --- a/libs/dyn/walk.go +++ b/libs/dyn/walk.go @@ -45,7 +45,7 @@ func walk(v Value, p Path, fn func(p Path, v Value) (Value, error)) (Value, erro if err != nil { return InvalidValue, err } - out.Set(pk, nv) + out.Set(pk, nv) //nolint:errcheck } v.v = out case KindSequence: diff --git a/libs/dyn/yamlloader/loader.go b/libs/dyn/yamlloader/loader.go index a77ee0744..fe58d6dfb 100644 --- a/libs/dyn/yamlloader/loader.go +++ b/libs/dyn/yamlloader/loader.go @@ -129,7 +129,7 @@ func (d *loader) loadMapping(node *yaml.Node, loc dyn.Location) (dyn.Value, erro return dyn.InvalidValue, err } - acc.Set(k, v) + acc.Set(k, v) //nolint:errcheck } if merge == nil { @@ -137,8 +137,8 @@ func (d *loader) loadMapping(node *yaml.Node, loc dyn.Location) (dyn.Value, erro } // Build location for the merge node. - var mloc = d.location(merge) - var merr = errorf(mloc, "map merge requires map or sequence of maps as the value") + mloc := d.location(merge) + merr := errorf(mloc, "map merge requires map or sequence of maps as the value") // Flatten the merge node into a slice of nodes. // It can be either a single node or a sequence of nodes. diff --git a/libs/dyn/yamlloader/yaml_spec_test.go b/libs/dyn/yamlloader/yaml_spec_test.go index 2a5ae817f..d9997f702 100644 --- a/libs/dyn/yamlloader/yaml_spec_test.go +++ b/libs/dyn/yamlloader/yaml_spec_test.go @@ -777,7 +777,8 @@ func TestYAMLSpecExample_2_27(t *testing.T) { ), }, []dyn.Location{{File: file, Line: 22, Column: 3}}, - )}, + ), + }, []dyn.Location{{File: file, Line: 18, Column: 1}}, ), "tax": dyn.NewValue( diff --git a/libs/dyn/yamlsaver/saver.go b/libs/dyn/yamlsaver/saver.go index 0fd81d534..7398e2594 100644 --- a/libs/dyn/yamlsaver/saver.go +++ b/libs/dyn/yamlsaver/saver.go @@ -27,7 +27,7 @@ func NewSaverWithStyle(nodesWithStyle map[string]yaml.Style) *saver { } func (s *saver) SaveAsYAML(data any, filename string, force bool) error { - err := os.MkdirAll(filepath.Dir(filename), 0755) + err := os.MkdirAll(filepath.Dir(filename), 0o755) if err != nil { return err } diff --git a/libs/dyn/yamlsaver/saver_test.go b/libs/dyn/yamlsaver/saver_test.go index aa481c20b..89bd5c31e 100644 --- a/libs/dyn/yamlsaver/saver_test.go +++ b/libs/dyn/yamlsaver/saver_test.go @@ -11,7 +11,7 @@ import ( func TestMarshalNilValue(t *testing.T) { s := NewSaver() - var nilValue = dyn.NilValue + nilValue := dyn.NilValue v, err := s.toYamlNode(nilValue) assert.NoError(t, err) assert.Equal(t, "null", v.Value) @@ -19,7 +19,7 @@ func TestMarshalNilValue(t *testing.T) { func TestMarshalIntValue(t *testing.T) { s := NewSaver() - var intValue = dyn.V(1) + intValue := dyn.V(1) v, err := s.toYamlNode(intValue) assert.NoError(t, err) assert.Equal(t, "1", v.Value) @@ -28,7 +28,7 @@ func TestMarshalIntValue(t *testing.T) { func TestMarshalFloatValue(t *testing.T) { s := NewSaver() - var floatValue = dyn.V(1.0) + floatValue := dyn.V(1.0) v, err := s.toYamlNode(floatValue) assert.NoError(t, err) assert.Equal(t, "1", v.Value) @@ -37,7 +37,7 @@ func TestMarshalFloatValue(t *testing.T) { func TestMarshalBoolValue(t *testing.T) { s := NewSaver() - var boolValue = dyn.V(true) + boolValue := dyn.V(true) v, err := s.toYamlNode(boolValue) assert.NoError(t, err) assert.Equal(t, "true", v.Value) @@ -49,7 +49,7 @@ func TestMarshalTimeValue(t *testing.T) { require.NoError(t, err) s := NewSaver() - var timeValue = dyn.V(tm) + timeValue := dyn.V(tm) v, err := s.toYamlNode(timeValue) assert.NoError(t, err) assert.Equal(t, "1970-01-01", v.Value) @@ -58,7 +58,7 @@ func TestMarshalTimeValue(t *testing.T) { func TestMarshalSequenceValue(t *testing.T) { s := NewSaver() - var sequenceValue = dyn.NewValue( + sequenceValue := dyn.NewValue( []dyn.Value{ dyn.NewValue("value1", []dyn.Location{{File: "file", Line: 1, Column: 2}}), dyn.NewValue("value2", []dyn.Location{{File: "file", Line: 2, Column: 2}}), @@ -74,7 +74,7 @@ func TestMarshalSequenceValue(t *testing.T) { func TestMarshalStringValue(t *testing.T) { s := NewSaver() - var stringValue = dyn.V("value") + stringValue := dyn.V("value") v, err := s.toYamlNode(stringValue) assert.NoError(t, err) assert.Equal(t, "value", v.Value) @@ -83,7 +83,7 @@ func TestMarshalStringValue(t *testing.T) { func TestMarshalMapValue(t *testing.T) { s := NewSaver() - var mapValue = dyn.NewValue( + mapValue := dyn.NewValue( map[string]dyn.Value{ "key3": dyn.NewValue("value3", []dyn.Location{{File: "file", Line: 3, Column: 2}}), "key2": dyn.NewValue("value2", []dyn.Location{{File: "file", Line: 2, Column: 2}}), @@ -107,7 +107,7 @@ func TestMarshalMapValue(t *testing.T) { func TestMarshalNestedValues(t *testing.T) { s := NewSaver() - var mapValue = dyn.NewValue( + mapValue := dyn.NewValue( map[string]dyn.Value{ "key1": dyn.NewValue( map[string]dyn.Value{ @@ -129,14 +129,14 @@ func TestMarshalNestedValues(t *testing.T) { func TestMarshalHexadecimalValueIsQuoted(t *testing.T) { s := NewSaver() - var hexValue = dyn.V(0x123) + hexValue := dyn.V(0x123) v, err := s.toYamlNode(hexValue) assert.NoError(t, err) assert.Equal(t, "291", v.Value) assert.Equal(t, yaml.Style(0), v.Style) assert.Equal(t, yaml.ScalarNode, v.Kind) - var stringValue = dyn.V("0x123") + stringValue := dyn.V("0x123") v, err = s.toYamlNode(stringValue) assert.NoError(t, err) assert.Equal(t, "0x123", v.Value) @@ -146,14 +146,14 @@ func TestMarshalHexadecimalValueIsQuoted(t *testing.T) { func TestMarshalBinaryValueIsQuoted(t *testing.T) { s := NewSaver() - var binaryValue = dyn.V(0b101) + binaryValue := dyn.V(0b101) v, err := s.toYamlNode(binaryValue) assert.NoError(t, err) assert.Equal(t, "5", v.Value) assert.Equal(t, yaml.Style(0), v.Style) assert.Equal(t, yaml.ScalarNode, v.Kind) - var stringValue = dyn.V("0b101") + stringValue := dyn.V("0b101") v, err = s.toYamlNode(stringValue) assert.NoError(t, err) assert.Equal(t, "0b101", v.Value) @@ -163,14 +163,14 @@ func TestMarshalBinaryValueIsQuoted(t *testing.T) { func TestMarshalOctalValueIsQuoted(t *testing.T) { s := NewSaver() - var octalValue = dyn.V(0123) + octalValue := dyn.V(0o123) v, err := s.toYamlNode(octalValue) assert.NoError(t, err) assert.Equal(t, "83", v.Value) assert.Equal(t, yaml.Style(0), v.Style) assert.Equal(t, yaml.ScalarNode, v.Kind) - var stringValue = dyn.V("0123") + stringValue := dyn.V("0123") v, err = s.toYamlNode(stringValue) assert.NoError(t, err) assert.Equal(t, "0123", v.Value) @@ -180,14 +180,14 @@ func TestMarshalOctalValueIsQuoted(t *testing.T) { func TestMarshalFloatValueIsQuoted(t *testing.T) { s := NewSaver() - var floatValue = dyn.V(1.0) + floatValue := dyn.V(1.0) v, err := s.toYamlNode(floatValue) assert.NoError(t, err) assert.Equal(t, "1", v.Value) assert.Equal(t, yaml.Style(0), v.Style) assert.Equal(t, yaml.ScalarNode, v.Kind) - var stringValue = dyn.V("1.0") + stringValue := dyn.V("1.0") v, err = s.toYamlNode(stringValue) assert.NoError(t, err) assert.Equal(t, "1.0", v.Value) @@ -197,14 +197,14 @@ func TestMarshalFloatValueIsQuoted(t *testing.T) { func TestMarshalBoolValueIsQuoted(t *testing.T) { s := NewSaver() - var boolValue = dyn.V(true) + boolValue := dyn.V(true) v, err := s.toYamlNode(boolValue) assert.NoError(t, err) assert.Equal(t, "true", v.Value) assert.Equal(t, yaml.Style(0), v.Style) assert.Equal(t, yaml.ScalarNode, v.Kind) - var stringValue = dyn.V("true") + stringValue := dyn.V("true") v, err = s.toYamlNode(stringValue) assert.NoError(t, err) assert.Equal(t, "true", v.Value) @@ -217,7 +217,7 @@ func TestCustomStylingWithNestedMap(t *testing.T) { "styled": yaml.DoubleQuotedStyle, }) - var styledMap = dyn.NewValue( + styledMap := dyn.NewValue( map[string]dyn.Value{ "key1": dyn.NewValue("value1", []dyn.Location{{File: "file", Line: 1, Column: 2}}), "key2": dyn.NewValue("value2", []dyn.Location{{File: "file", Line: 2, Column: 2}}), @@ -225,7 +225,7 @@ func TestCustomStylingWithNestedMap(t *testing.T) { []dyn.Location{{File: "file", Line: -2, Column: 2}}, ) - var unstyledMap = dyn.NewValue( + unstyledMap := dyn.NewValue( map[string]dyn.Value{ "key3": dyn.NewValue("value3", []dyn.Location{{File: "file", Line: 1, Column: 2}}), "key4": dyn.NewValue("value4", []dyn.Location{{File: "file", Line: 2, Column: 2}}), @@ -233,7 +233,7 @@ func TestCustomStylingWithNestedMap(t *testing.T) { []dyn.Location{{File: "file", Line: -1, Column: 2}}, ) - var val = dyn.NewValue( + val := dyn.NewValue( map[string]dyn.Value{ "styled": styledMap, "unstyled": unstyledMap, diff --git a/libs/env/loader.go b/libs/env/loader.go index f441ffa15..74c54cee8 100644 --- a/libs/env/loader.go +++ b/libs/env/loader.go @@ -43,7 +43,9 @@ func (le *configLoader) Configure(cfg *config.Config) error { if v == "" { continue } - a.Set(cfg, v) + if err := a.Set(cfg, v); err != nil { + return err + } } } return nil diff --git a/libs/exec/exec.go b/libs/exec/exec.go index 8e4633271..466117e60 100644 --- a/libs/exec/exec.go +++ b/libs/exec/exec.go @@ -10,9 +10,11 @@ import ( type ExecutableType string -const BashExecutable ExecutableType = `bash` -const ShExecutable ExecutableType = `sh` -const CmdExecutable ExecutableType = `cmd` +const ( + BashExecutable ExecutableType = `bash` + ShExecutable ExecutableType = `sh` + CmdExecutable ExecutableType = `cmd` +) var finders map[ExecutableType](func() (shell, error)) = map[ExecutableType](func() (shell, error)){ BashExecutable: newBashShell, diff --git a/libs/exec/exec_test.go b/libs/exec/exec_test.go index ad54601d0..e75c158bd 100644 --- a/libs/exec/exec_test.go +++ b/libs/exec/exec_test.go @@ -12,6 +12,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestExecutorWithSimpleInput(t *testing.T) { @@ -86,9 +87,11 @@ func testExecutorWithShell(t *testing.T, shell string) { tmpDir := t.TempDir() t.Setenv("PATH", tmpDir) if runtime.GOOS == "windows" { - os.Symlink(p, fmt.Sprintf("%s/%s.exe", tmpDir, shell)) + err = os.Symlink(p, fmt.Sprintf("%s/%s.exe", tmpDir, shell)) + require.NoError(t, err) } else { - os.Symlink(p, fmt.Sprintf("%s/%s", tmpDir, shell)) + err = os.Symlink(p, fmt.Sprintf("%s/%s", tmpDir, shell)) + require.NoError(t, err) } executor, err := NewCommandExecutor(".") diff --git a/libs/exec/shell.go b/libs/exec/shell.go index f5d176896..ee29eac8a 100644 --- a/libs/exec/shell.go +++ b/libs/exec/shell.go @@ -36,7 +36,7 @@ func findShell() (shell, error) { return nil, errors.New("no shell found") } -func createTempScript(command string, extension string) (string, error) { +func createTempScript(command, extension string) (string, error) { file, err := os.CreateTemp(os.TempDir(), "cli-exec*"+extension) if err != nil { return "", err diff --git a/libs/filer/filer.go b/libs/filer/filer.go index b5be4c3c2..83dc560cb 100644 --- a/libs/filer/filer.go +++ b/libs/filer/filer.go @@ -103,8 +103,7 @@ func (err DirectoryNotEmptyError) Is(other error) bool { return other == fs.ErrInvalid } -type CannotDeleteRootError struct { -} +type CannotDeleteRootError struct{} func (err CannotDeleteRootError) Error() string { return "unable to delete filer root" diff --git a/libs/filer/local_client.go b/libs/filer/local_client.go index 8b25345fc..385aa6924 100644 --- a/libs/filer/local_client.go +++ b/libs/filer/local_client.go @@ -29,7 +29,7 @@ func (w *LocalClient) Write(ctx context.Context, name string, reader io.Reader, } // Retrieve permission mask from the [WriteMode], if present. - perm := fs.FileMode(0644) + perm := fs.FileMode(0o644) for _, m := range mode { bits := m & writeModePerm if bits != 0 { @@ -47,7 +47,7 @@ func (w *LocalClient) Write(ctx context.Context, name string, reader io.Reader, f, err := os.OpenFile(absPath, flags, perm) if errors.Is(err, fs.ErrNotExist) && slices.Contains(mode, CreateParentDirectories) { // Create parent directories if they don't exist. - err = os.MkdirAll(filepath.Dir(absPath), 0755) + err = os.MkdirAll(filepath.Dir(absPath), 0o755) if err != nil { return err } @@ -73,7 +73,6 @@ func (w *LocalClient) Write(ctx context.Context, name string, reader io.Reader, } return err - } func (w *LocalClient) Read(ctx context.Context, name string) (io.ReadCloser, error) { @@ -159,7 +158,7 @@ func (w *LocalClient) Mkdir(ctx context.Context, name string) error { return err } - return os.MkdirAll(dirPath, 0755) + return os.MkdirAll(dirPath, 0o755) } func (w *LocalClient) Stat(ctx context.Context, name string) (fs.FileInfo, error) { diff --git a/libs/filer/slice_test.go b/libs/filer/slice_test.go index 21d783483..2bdb3f7f5 100644 --- a/libs/filer/slice_test.go +++ b/libs/filer/slice_test.go @@ -12,11 +12,10 @@ func TestSliceWithout(t *testing.T) { assert.Equal(t, []int{2, 3}, sliceWithout([]int{1, 2, 3}, 1)) assert.Equal(t, []int{1, 3}, sliceWithout([]int{1, 2, 3}, 2)) assert.Equal(t, []int{1, 2}, sliceWithout([]int{1, 2, 3}, 3)) - } func TestSliceWithoutReturnsClone(t *testing.T) { - var ints = []int{1, 2, 3} + ints := []int{1, 2, 3} assert.Equal(t, []int{2, 3}, sliceWithout(ints, 1)) assert.Equal(t, []int{1, 2, 3}, ints) } diff --git a/libs/filer/workspace_files_extensions_client.go b/libs/filer/workspace_files_extensions_client.go index 2a6052091..9ee2722e1 100644 --- a/libs/filer/workspace_files_extensions_client.go +++ b/libs/filer/workspace_files_extensions_client.go @@ -52,7 +52,8 @@ func (w *workspaceFilesExtensionsClient) getNotebookStatByNameWithExt(ctx contex notebook.ExtensionR, notebook.ExtensionScala, notebook.ExtensionSql, - notebook.ExtensionJupyter}, ext) { + notebook.ExtensionJupyter, + }, ext) { return nil, nil } diff --git a/libs/filer/workspace_files_extensions_client_test.go b/libs/filer/workspace_files_extensions_client_test.go index 10c176b31..10a2bebf0 100644 --- a/libs/filer/workspace_files_extensions_client_test.go +++ b/libs/filer/workspace_files_extensions_client_test.go @@ -17,8 +17,9 @@ type mockApiClient struct { } func (m *mockApiClient) Do(ctx context.Context, method, path string, - headers map[string]string, request any, response any, - visitors ...func(*http.Request) error) error { + headers map[string]string, request, response any, + visitors ...func(*http.Request) error, +) error { args := m.Called(ctx, method, path, headers, request, response, visitors) // Set the http response from a value provided in the mock call. diff --git a/libs/flags/json_flag_test.go b/libs/flags/json_flag_test.go index 77530086a..b31324011 100644 --- a/libs/flags/json_flag_test.go +++ b/libs/flags/json_flag_test.go @@ -57,12 +57,13 @@ func TestJsonFlagFile(t *testing.T) { var request any var fpath string - var payload = []byte(`{"foo": "bar"}`) + payload := []byte(`{"foo": "bar"}`) { f, err := os.Create(path.Join(t.TempDir(), "file")) require.NoError(t, err) - f.Write(payload) + _, err = f.Write(payload) + require.NoError(t, err) f.Close() fpath = f.Name() } diff --git a/libs/flags/log_file_flag.go b/libs/flags/log_file_flag.go index 9e60353f0..d2fe51d91 100644 --- a/libs/flags/log_file_flag.go +++ b/libs/flags/log_file_flag.go @@ -48,7 +48,7 @@ func (f *realLogFile) Writer() io.Writer { } func (f *realLogFile) Open() error { - file, err := os.OpenFile(f.s, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0600) + file, err := os.OpenFile(f.s, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0o600) if err != nil { return err } diff --git a/libs/flags/yaml_flag.go b/libs/flags/yaml_flag.go deleted file mode 100644 index 95cc9b4be..000000000 --- a/libs/flags/yaml_flag.go +++ /dev/null @@ -1,42 +0,0 @@ -package flags - -import ( - "fmt" - "os" - - "github.com/ghodss/yaml" -) - -type YamlFlag struct { - raw []byte -} - -func (y *YamlFlag) String() string { - return fmt.Sprintf("YAML (%d bytes)", len(y.raw)) -} - -// TODO: Command.MarkFlagFilename() -func (y *YamlFlag) Set(v string) error { - // Load request from file if it starts with '@' (like curl). - if v[0] != '@' { - y.raw = []byte(v) - return nil - } - buf, err := os.ReadFile(v[1:]) - if err != nil { - return fmt.Errorf("read %s: %w", v, err) - } - y.raw = buf - return nil -} - -func (y *YamlFlag) Unmarshal(v any) error { - if y.raw == nil { - return nil - } - return yaml.Unmarshal(y.raw, v) -} - -func (y *YamlFlag) Type() string { - return "YAML" -} diff --git a/libs/folders/folders.go b/libs/folders/folders.go index c83c711d3..bbabc588c 100644 --- a/libs/folders/folders.go +++ b/libs/folders/folders.go @@ -8,7 +8,11 @@ import ( // FindDirWithLeaf returns the first directory that holds `leaf`, // traversing up to the root of the filesystem, starting at `dir`. -func FindDirWithLeaf(dir string, leaf string) (string, error) { +func FindDirWithLeaf(dir, leaf string) (string, error) { + dir, err := filepath.Abs(dir) + if err != nil { + return "", err + } for { _, err := os.Stat(filepath.Join(dir, leaf)) diff --git a/libs/git/config.go b/libs/git/config.go index fafd81bd6..f7ff057e1 100644 --- a/libs/git/config.go +++ b/libs/git/config.go @@ -155,8 +155,8 @@ func globalGitConfig() (*config, error) { // > are missing or unreadable they will be ignored. // // We therefore ignore the error return value for the calls below. - config.loadFile(vfs.MustNew(xdgConfigHome), "git/config") - config.loadFile(vfs.MustNew(config.home), ".gitconfig") + _ = config.loadFile(vfs.MustNew(xdgConfigHome), "git/config") + _ = config.loadFile(vfs.MustNew(config.home), ".gitconfig") return config, nil } diff --git a/libs/git/config_test.go b/libs/git/config_test.go index 3e6edf765..73f3431c9 100644 --- a/libs/git/config_test.go +++ b/libs/git/config_test.go @@ -113,7 +113,7 @@ func (h *testCoreExcludesHelper) initialize(t *testing.T) { t.Setenv("XDG_CONFIG_HOME", h.xdgConfigHome) xdgConfigHomeGit := filepath.Join(h.xdgConfigHome, "git") - err := os.MkdirAll(xdgConfigHomeGit, 0755) + err := os.MkdirAll(xdgConfigHomeGit, 0o755) require.NoError(t, err) } @@ -124,7 +124,7 @@ func (h *testCoreExcludesHelper) coreExcludesFile() (string, error) { } func (h *testCoreExcludesHelper) writeConfig(path, contents string) { - err := os.WriteFile(path, []byte(contents), 0644) + err := os.WriteFile(path, []byte(contents), 0o644) require.NoError(h, err) } diff --git a/libs/git/fileset_test.go b/libs/git/fileset_test.go index f4fd931fd..6d239edf5 100644 --- a/libs/git/fileset_test.go +++ b/libs/git/fileset_test.go @@ -56,7 +56,8 @@ func TestFileSetAddsCacheDirToGitIgnore(t *testing.T) { projectDir := t.TempDir() fileSet, err := NewFileSetAtRoot(vfs.MustNew(projectDir)) require.NoError(t, err) - fileSet.EnsureValidGitIgnoreExists() + err = fileSet.EnsureValidGitIgnoreExists() + require.NoError(t, err) gitIgnorePath := filepath.Join(projectDir, ".gitignore") assert.FileExists(t, gitIgnorePath) @@ -74,7 +75,8 @@ func TestFileSetDoesNotCacheDirToGitIgnoreIfAlreadyPresent(t *testing.T) { err = os.WriteFile(gitIgnorePath, []byte(".databricks"), 0o644) require.NoError(t, err) - fileSet.EnsureValidGitIgnoreExists() + err = fileSet.EnsureValidGitIgnoreExists() + require.NoError(t, err) b, err := os.ReadFile(gitIgnorePath) require.NoError(t, err) diff --git a/libs/git/ignore_test.go b/libs/git/ignore_test.go index 057c0cb2e..9e2713608 100644 --- a/libs/git/ignore_test.go +++ b/libs/git/ignore_test.go @@ -48,7 +48,7 @@ func TestIgnoreFileTaint(t *testing.T) { assert.False(t, ign) // Now create the .gitignore file. - err = os.WriteFile(gitIgnorePath, []byte("hello"), 0644) + err = os.WriteFile(gitIgnorePath, []byte("hello"), 0o644) require.NoError(t, err) // Verify that the match still doesn't happen (no spontaneous reload). diff --git a/libs/git/info.go b/libs/git/info.go index 13c298113..46e57be48 100644 --- a/libs/git/info.go +++ b/libs/git/info.go @@ -2,15 +2,12 @@ package git import ( "context" - "errors" - "io/fs" "net/http" - "os" "path" - "path/filepath" "strings" "github.com/databricks/cli/libs/dbr" + "github.com/databricks/cli/libs/folders" "github.com/databricks/cli/libs/log" "github.com/databricks/cli/libs/vfs" "github.com/databricks/databricks-sdk-go" @@ -75,7 +72,6 @@ func fetchRepositoryInfoAPI(ctx context.Context, path string, w *databricks.Work }, &response, ) - if err != nil { return result, err } @@ -105,7 +101,7 @@ func ensureWorkspacePrefix(p string) string { func fetchRepositoryInfoDotGit(ctx context.Context, path string) (RepositoryInfo, error) { result := RepositoryInfo{} - rootDir, err := findLeafInTree(path, GitDirectoryName) + rootDir, err := folders.FindDirWithLeaf(path, GitDirectoryName) if rootDir == "" { return result, err } @@ -134,28 +130,3 @@ func fetchRepositoryInfoDotGit(ctx context.Context, path string) (RepositoryInfo return result, nil } - -func findLeafInTree(p string, leafName string) (string, error) { - var err error - for i := 0; i < 10000; i++ { - _, err = os.Stat(filepath.Join(p, leafName)) - - if err == nil { - // Found [leafName] in p - return p, nil - } - - // ErrNotExist means we continue traversal up the tree. - if errors.Is(err, fs.ErrNotExist) { - parent := filepath.Dir(p) - if parent == p { - return "", nil - } - p = parent - continue - } - break - } - - return "", err -} diff --git a/libs/git/reference.go b/libs/git/reference.go index 2165a9cda..e1126d4f2 100644 --- a/libs/git/reference.go +++ b/libs/git/reference.go @@ -12,8 +12,10 @@ import ( type ReferenceType string -var ErrNotAReferencePointer = fmt.Errorf("HEAD does not point to another reference") -var ErrNotABranch = fmt.Errorf("HEAD is not a reference to a git branch") +var ( + ErrNotAReferencePointer = fmt.Errorf("HEAD does not point to another reference") + ErrNotABranch = fmt.Errorf("HEAD is not a reference to a git branch") +) const ( // pointer to a secondary reference file path containing sha-1 object ID. @@ -30,8 +32,10 @@ type Reference struct { Content string } -const ReferencePrefix = "ref: " -const HeadPathPrefix = "refs/heads/" +const ( + ReferencePrefix = "ref: " + HeadPathPrefix = "refs/heads/" +) // asserts if a string is a 40 character hexadecimal encoded string func isSHA1(s string) bool { diff --git a/libs/git/reference_test.go b/libs/git/reference_test.go index 194d79333..bfa0e50e5 100644 --- a/libs/git/reference_test.go +++ b/libs/git/reference_test.go @@ -54,7 +54,8 @@ func TestReferenceLoadingForObjectID(t *testing.T) { f, err := os.Create(filepath.Join(tmp, "HEAD")) require.NoError(t, err) defer f.Close() - f.WriteString(strings.Repeat("e", 40) + "\r\n") + _, err = f.WriteString(strings.Repeat("e", 40) + "\r\n") + require.NoError(t, err) ref, err := LoadReferenceFile(vfs.MustNew(tmp), "HEAD") assert.NoError(t, err) @@ -67,7 +68,8 @@ func TestReferenceLoadingForReference(t *testing.T) { f, err := os.OpenFile(filepath.Join(tmp, "HEAD"), os.O_CREATE|os.O_WRONLY, os.ModePerm) require.NoError(t, err) defer f.Close() - f.WriteString("ref: refs/heads/foo\n") + _, err = f.WriteString("ref: refs/heads/foo\n") + require.NoError(t, err) ref, err := LoadReferenceFile(vfs.MustNew(tmp), "HEAD") assert.NoError(t, err) @@ -80,7 +82,8 @@ func TestReferenceLoadingFailsForInvalidContent(t *testing.T) { f, err := os.OpenFile(filepath.Join(tmp, "HEAD"), os.O_CREATE|os.O_WRONLY, os.ModePerm) require.NoError(t, err) defer f.Close() - f.WriteString("abc") + _, err = f.WriteString("abc") + require.NoError(t, err) _, err = LoadReferenceFile(vfs.MustNew(tmp), "HEAD") assert.ErrorContains(t, err, "unknown format for git HEAD") diff --git a/libs/git/repository_test.go b/libs/git/repository_test.go index 93d9a03dc..857df65a9 100644 --- a/libs/git/repository_test.go +++ b/libs/git/repository_test.go @@ -27,7 +27,7 @@ func newTestRepository(t *testing.T) *testRepository { require.NoError(t, err) defer f1.Close() - f1.WriteString( + _, err = f1.WriteString( `[core] repositoryformatversion = 0 filemode = true @@ -36,6 +36,7 @@ func newTestRepository(t *testing.T) *testRepository { ignorecase = true precomposeunicode = true `) + require.NoError(t, err) f2, err := os.Create(filepath.Join(tmp, ".git", "HEAD")) require.NoError(t, err) @@ -62,7 +63,7 @@ func (testRepo *testRepository) checkoutCommit(commitId string) { require.NoError(testRepo.t, err) } -func (testRepo *testRepository) addBranch(name string, latestCommit string) { +func (testRepo *testRepository) addBranch(name, latestCommit string) { // create dir for branch head reference branchDir := filepath.Join(testRepo.r.Root(), ".git", "refs", "heads") err := os.MkdirAll(branchDir, os.ModePerm) diff --git a/libs/git/view.go b/libs/git/view.go index 2eaba1f8b..db22dfc5d 100644 --- a/libs/git/view.go +++ b/libs/git/view.go @@ -113,7 +113,7 @@ func (v *View) EnsureValidGitIgnoreExists() error { // Create .gitignore with .databricks entry gitIgnorePath := filepath.Join(v.repo.Root(), v.targetPath, ".gitignore") - file, err := os.OpenFile(gitIgnorePath, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0644) + file, err := os.OpenFile(gitIgnorePath, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0o644) if err != nil { return err } diff --git a/libs/git/view_test.go b/libs/git/view_test.go index 06f6f9419..96881fdee 100644 --- a/libs/git/view_test.go +++ b/libs/git/view_test.go @@ -20,7 +20,7 @@ func copyTestdata(t *testing.T, name string) string { require.NoError(t, err) if d.IsDir() { - err := os.MkdirAll(filepath.Join(tempDir, path), 0755) + err := os.MkdirAll(filepath.Join(tempDir, path), 0o755) require.NoError(t, err) return nil } @@ -46,7 +46,7 @@ func createFakeRepo(t *testing.T, testdataName string) string { absPath := copyTestdata(t, testdataName) // Add .git directory to make it look like a Git repository. - err := os.Mkdir(filepath.Join(absPath, ".git"), 0755) + err := os.Mkdir(filepath.Join(absPath, ".git"), 0o755) require.NoError(t, err) return absPath } diff --git a/libs/jsonschema/extension.go b/libs/jsonschema/extension.go index 3e32caf1a..9badf86a5 100644 --- a/libs/jsonschema/extension.go +++ b/libs/jsonschema/extension.go @@ -34,4 +34,10 @@ type Extension struct { // Version of the schema. This is used to determine if the schema is // compatible with the current CLI version. Version *int `json:"version,omitempty"` + + // This field is not in JSON schema spec, but it is supported in VSCode and in the Databricks Workspace + // It is used to provide a rich description of the field in the hover tooltip. + // https://code.visualstudio.com/docs/languages/json#_use-rich-formatting-in-hovers + // Also it can be used in documentation generation. + MarkdownDescription string `json:"markdownDescription,omitempty"` } diff --git a/libs/jsonschema/from_type_test.go b/libs/jsonschema/from_type_test.go index 0ddb1011a..cdfdcfd10 100644 --- a/libs/jsonschema/from_type_test.go +++ b/libs/jsonschema/from_type_test.go @@ -403,7 +403,8 @@ func TestFromTypeError(t *testing.T) { // Maps with non-string keys should panic. type mapOfInts map[int]int assert.PanicsWithValue(t, "found map with non-string key: int", func() { - FromType(reflect.TypeOf(mapOfInts{}), nil) + _, err := FromType(reflect.TypeOf(mapOfInts{}), nil) + require.NoError(t, err) }) // Unsupported types should return an error. diff --git a/libs/jsonschema/schema.go b/libs/jsonschema/schema.go index b9c3fb08c..e63dde359 100644 --- a/libs/jsonschema/schema.go +++ b/libs/jsonschema/schema.go @@ -69,6 +69,13 @@ type Schema struct { // Schema that must match any of the schemas in the array AnyOf []Schema `json:"anyOf,omitempty"` + + // Schema that must match one of the schemas in the array + OneOf []Schema `json:"oneOf,omitempty"` + + // Title of the object, rendered as inline documentation in the IDE. + // https://json-schema.org/understanding-json-schema/reference/annotations + Title string `json:"title,omitempty"` } // Default value defined in a JSON Schema, represented as a string. diff --git a/libs/jsonschema/validate_type.go b/libs/jsonschema/validate_type.go index 125d6b20b..9f70498ba 100644 --- a/libs/jsonschema/validate_type.go +++ b/libs/jsonschema/validate_type.go @@ -39,9 +39,11 @@ func validateNumber(v any) error { } func validateInteger(v any) error { - if !slices.Contains([]reflect.Kind{reflect.Int, reflect.Int8, reflect.Int16, + if !slices.Contains([]reflect.Kind{ + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, - reflect.Uint32, reflect.Uint64}, + reflect.Uint32, reflect.Uint64, + }, reflect.TypeOf(v).Kind()) { return fmt.Errorf("expected type integer, but value is %#v", v) } diff --git a/libs/locker/locker.go b/libs/locker/locker.go index b0d65c42e..eb59c9f74 100644 --- a/libs/locker/locker.go +++ b/libs/locker/locker.go @@ -140,7 +140,7 @@ func (locker *Locker) Lock(ctx context.Context, isForced bool) error { return err } - var modes = []filer.WriteMode{ + modes := []filer.WriteMode{ // Always create parent directory if it doesn't yet exist. filer.CreateParentDirectories, } @@ -196,7 +196,7 @@ func (locker *Locker) Unlock(ctx context.Context, opts ...UnlockOption) error { return nil } -func CreateLocker(user string, targetDir string, w *databricks.WorkspaceClient) (*Locker, error) { +func CreateLocker(user, targetDir string, w *databricks.WorkspaceClient) (*Locker, error) { filer, err := filer.NewWorkspaceFilesClient(w, targetDir) if err != nil { return nil, err diff --git a/libs/log/context.go b/libs/log/context.go index d9e31d116..5e3e8ccb6 100644 --- a/libs/log/context.go +++ b/libs/log/context.go @@ -2,7 +2,6 @@ package log import ( "context" - "log/slog" ) diff --git a/libs/log/logger.go b/libs/log/logger.go index 43a30e92b..c1d307c89 100644 --- a/libs/log/logger.go +++ b/libs/log/logger.go @@ -3,10 +3,9 @@ package log import ( "context" "fmt" + "log/slog" "runtime" "time" - - "log/slog" ) // GetLogger returns either the logger configured on the context, @@ -31,6 +30,51 @@ func log(logger *slog.Logger, ctx context.Context, level slog.Level, msg string) _ = logger.Handler().Handle(ctx, r) } +// Trace logs a string using the context-local or global logger. +func Trace(ctx context.Context, msg string) { + logger := GetLogger(ctx) + if !logger.Enabled(ctx, LevelTrace) { + return + } + log(logger, ctx, LevelTrace, msg) +} + +// Debug logs a string using the context-local or global logger. +func Debug(ctx context.Context, msg string) { + logger := GetLogger(ctx) + if !logger.Enabled(ctx, LevelDebug) { + return + } + log(logger, ctx, LevelDebug, msg) +} + +// Info logs a string using the context-local or global logger. +func Info(ctx context.Context, msg string) { + logger := GetLogger(ctx) + if !logger.Enabled(ctx, LevelInfo) { + return + } + log(logger, ctx, LevelInfo, msg) +} + +// Warn logs a string using the context-local or global logger. +func Warn(ctx context.Context, msg string) { + logger := GetLogger(ctx) + if !logger.Enabled(ctx, LevelWarn) { + return + } + log(logger, ctx, LevelWarn, msg) +} + +// Error logs a string using the context-local or global logger. +func Error(ctx context.Context, msg string) { + logger := GetLogger(ctx) + if !logger.Enabled(ctx, LevelError) { + return + } + log(logger, ctx, LevelError, msg) +} + // Tracef logs a formatted string using the context-local or global logger. func Tracef(ctx context.Context, format string, v ...any) { logger := GetLogger(ctx) diff --git a/libs/log/sdk.go b/libs/log/sdk.go index e1b1ffed4..086f80f50 100644 --- a/libs/log/sdk.go +++ b/libs/log/sdk.go @@ -3,11 +3,10 @@ package log import ( "context" "fmt" + "log/slog" "runtime" "time" - "log/slog" - sdk "github.com/databricks/databricks-sdk-go/logger" ) diff --git a/libs/notebook/detect.go b/libs/notebook/detect.go index cd8680bfa..40c850945 100644 --- a/libs/notebook/detect.go +++ b/libs/notebook/detect.go @@ -46,7 +46,7 @@ func (f file) close() error { func (f file) readHeader() (string, error) { // Scan header line with some padding. - var buf = make([]byte, headerLength) + buf := make([]byte, headerLength) n, err := f.f.Read([]byte(buf)) if err != nil && err != io.EOF { return "", err diff --git a/libs/notebook/detect_jupyter_test.go b/libs/notebook/detect_jupyter_test.go index 4ff2aeff6..af29a2214 100644 --- a/libs/notebook/detect_jupyter_test.go +++ b/libs/notebook/detect_jupyter_test.go @@ -41,7 +41,7 @@ func TestDetectJupyterInvalidJSON(t *testing.T) { dir := t.TempDir() path := filepath.Join(dir, "file.ipynb") buf := make([]byte, 128) - err := os.WriteFile(path, buf, 0644) + err := os.WriteFile(path, buf, 0o644) require.NoError(t, err) // Garbage contents means not a notebook. @@ -55,7 +55,7 @@ func TestDetectJupyterNoCells(t *testing.T) { dir := t.TempDir() path := filepath.Join(dir, "file.ipynb") buf := []byte("{}") - err := os.WriteFile(path, buf, 0644) + err := os.WriteFile(path, buf, 0o644) require.NoError(t, err) // Garbage contents means not a notebook. @@ -69,7 +69,7 @@ func TestDetectJupyterOldVersion(t *testing.T) { dir := t.TempDir() path := filepath.Join(dir, "file.ipynb") buf := []byte(`{ "cells": [], "metadata": {}, "nbformat": 3 }`) - err := os.WriteFile(path, buf, 0644) + err := os.WriteFile(path, buf, 0o644) require.NoError(t, err) // Garbage contents means not a notebook. diff --git a/libs/notebook/detect_test.go b/libs/notebook/detect_test.go index 786c7e394..4ede7bf9b 100644 --- a/libs/notebook/detect_test.go +++ b/libs/notebook/detect_test.go @@ -78,7 +78,7 @@ func TestDetectEmptyFile(t *testing.T) { // Create empty file. dir := t.TempDir() path := filepath.Join(dir, "file.py") - err := os.WriteFile(path, nil, 0644) + err := os.WriteFile(path, nil, 0o644) require.NoError(t, err) // No contents means not a notebook. @@ -92,7 +92,7 @@ func TestDetectFileWithLongHeader(t *testing.T) { dir := t.TempDir() path := filepath.Join(dir, "file.py") buf := make([]byte, 128*1024) - err := os.WriteFile(path, buf, 0644) + err := os.WriteFile(path, buf, 0o644) require.NoError(t, err) // Garbage contents means not a notebook. diff --git a/libs/process/stub.go b/libs/process/stub.go index 8472f65d5..8ab6fd705 100644 --- a/libs/process/stub.go +++ b/libs/process/stub.go @@ -148,13 +148,20 @@ func (s *processStub) run(cmd *exec.Cmd) error { if !re.MatchString(norm) { continue } + err := resp.err if resp.stdout != "" { - cmd.Stdout.Write([]byte(resp.stdout)) + _, err1 := cmd.Stdout.Write([]byte(resp.stdout)) + if err == nil { + err = err1 + } } if resp.stderr != "" { - cmd.Stderr.Write([]byte(resp.stderr)) + _, err1 := cmd.Stderr.Write([]byte(resp.stderr)) + if err == nil { + err = err1 + } } - return resp.err + return err } if s.callback != nil { return s.callback(cmd) @@ -163,8 +170,12 @@ func (s *processStub) run(cmd *exec.Cmd) error { if s.reponseStub == zeroStub { return fmt.Errorf("no default process stub") } + err := s.reponseStub.err if s.reponseStub.stdout != "" { - cmd.Stdout.Write([]byte(s.reponseStub.stdout)) + _, err1 := cmd.Stdout.Write([]byte(s.reponseStub.stdout)) + if err == nil { + err = err1 + } } - return s.reponseStub.err + return err } diff --git a/libs/process/stub_test.go b/libs/process/stub_test.go index 65f59f817..81afa3a89 100644 --- a/libs/process/stub_test.go +++ b/libs/process/stub_test.go @@ -43,8 +43,14 @@ func TestStubCallback(t *testing.T) { ctx := context.Background() ctx, stub := process.WithStub(ctx) stub.WithCallback(func(cmd *exec.Cmd) error { - cmd.Stderr.Write([]byte("something...")) - cmd.Stdout.Write([]byte("else...")) + _, err := cmd.Stderr.Write([]byte("something...")) + if err != nil { + return err + } + _, err = cmd.Stdout.Write([]byte("else...")) + if err != nil { + return err + } return fmt.Errorf("yep") }) diff --git a/libs/python/detect.go b/libs/python/detect.go index 8fcc7cd9c..e86d9d621 100644 --- a/libs/python/detect.go +++ b/libs/python/detect.go @@ -11,6 +11,19 @@ import ( "runtime" ) +// GetExecutable gets appropriate python binary name for the platform +func GetExecutable() string { + // On Windows when virtualenv is created, the /Scripts directory + // contains python.exe but no python3.exe. + // Most installers (e.g. the ones from python.org) only install python.exe and not python3.exe + + if runtime.GOOS == "windows" { + return "python" + } else { + return "python3" + } +} + // DetectExecutable looks up the path to the python3 executable from the PATH // environment variable. // @@ -25,7 +38,9 @@ func DetectExecutable(ctx context.Context) (string, error) { // the parent directory tree. // // See https://github.com/pyenv/pyenv#understanding-python-version-selection - out, err := exec.LookPath("python3") + + out, err := exec.LookPath(GetExecutable()) + // most of the OS'es have python3 in $PATH, but for those which don't, // we perform the latest version lookup if err != nil && !errors.Is(err, exec.ErrNotFound) { @@ -54,7 +69,7 @@ func DetectExecutable(ctx context.Context) (string, error) { func DetectVEnvExecutable(venvPath string) (string, error) { interpreterPath := filepath.Join(venvPath, "bin", "python3") if runtime.GOOS == "windows" { - interpreterPath = filepath.Join(venvPath, "Scripts", "python3.exe") + interpreterPath = filepath.Join(venvPath, "Scripts", "python.exe") } if _, err := os.Stat(interpreterPath); err != nil { diff --git a/libs/python/detect_test.go b/libs/python/detect_test.go index 78c7067f7..0aeedb776 100644 --- a/libs/python/detect_test.go +++ b/libs/python/detect_test.go @@ -14,13 +14,13 @@ func TestDetectVEnvExecutable(t *testing.T) { dir := t.TempDir() interpreterPath := interpreterPath(dir) - err := os.Mkdir(filepath.Dir(interpreterPath), 0755) + err := os.Mkdir(filepath.Dir(interpreterPath), 0o755) require.NoError(t, err) - err = os.WriteFile(interpreterPath, []byte(""), 0755) + err = os.WriteFile(interpreterPath, []byte(""), 0o755) require.NoError(t, err) - err = os.WriteFile(filepath.Join(dir, "pyvenv.cfg"), []byte(""), 0755) + err = os.WriteFile(filepath.Join(dir, "pyvenv.cfg"), []byte(""), 0o755) require.NoError(t, err) executable, err := DetectVEnvExecutable(dir) @@ -39,7 +39,7 @@ func TestDetectVEnvExecutable_badLayout(t *testing.T) { func interpreterPath(venvPath string) string { if runtime.GOOS == "windows" { - return filepath.Join(venvPath, "Scripts", "python3.exe") + return filepath.Join(venvPath, "Scripts", "python.exe") } else { return filepath.Join(venvPath, "bin", "python3") } diff --git a/libs/python/interpreters.go b/libs/python/interpreters.go index 94f5074de..6071309a8 100644 --- a/libs/python/interpreters.go +++ b/libs/python/interpreters.go @@ -18,8 +18,10 @@ import ( var ErrNoPythonInterpreters = errors.New("no python3 interpreters found") -const officialMswinPython = "(Python Official) https://python.org/downloads/windows" -const microsoftStorePython = "(Microsoft Store) https://apps.microsoft.com/store/search?publisher=Python%20Software%20Foundation" +const ( + officialMswinPython = "(Python Official) https://python.org/downloads/windows" + microsoftStorePython = "(Microsoft Store) https://apps.microsoft.com/store/search?publisher=Python%20Software%20Foundation" +) const worldWriteable = 0o002 diff --git a/libs/python/interpreters_unix_test.go b/libs/python/interpreters_unix_test.go index e2b0a5a1c..8471644a1 100644 --- a/libs/python/interpreters_unix_test.go +++ b/libs/python/interpreters_unix_test.go @@ -34,13 +34,14 @@ func TestFilteringInterpreters(t *testing.T) { rogueBin := filepath.Join(t.TempDir(), "rogue-bin") err := os.Mkdir(rogueBin, 0o777) assert.NoError(t, err) - os.Chmod(rogueBin, 0o777) + err = os.Chmod(rogueBin, 0o777) + assert.NoError(t, err) raw, err := os.ReadFile("testdata/world-writeable/python8.4") assert.NoError(t, err) injectedBinary := filepath.Join(rogueBin, "python8.4") - err = os.WriteFile(injectedBinary, raw, 00777) + err = os.WriteFile(injectedBinary, raw, 0o0777) assert.NoError(t, err) t.Setenv("PATH", "testdata/other-binaries-filtered:"+rogueBin) diff --git a/libs/python/pythontest/pythontest.go b/libs/python/pythontest/pythontest.go new file mode 100644 index 000000000..9a2dec0ee --- /dev/null +++ b/libs/python/pythontest/pythontest.go @@ -0,0 +1,107 @@ +package pythontest + +import ( + "context" + "errors" + "fmt" + "os" + "os/exec" + "path/filepath" + "runtime" + "strings" + "testing" + + "github.com/databricks/cli/internal/testutil" + "github.com/stretchr/testify/require" +) + +type VenvOpts struct { + // input + PythonVersion string + skipVersionCheck bool + + // input/output + Dir string + Name string + + // output: + // Absolute path to venv + EnvPath string + + // Absolute path to venv/bin or venv/Scripts, depending on OS + BinPath string + + // Absolute path to python binary + PythonExe string +} + +func CreatePythonEnv(opts *VenvOpts) error { + if opts == nil || opts.PythonVersion == "" { + return errors.New("PythonVersion must be provided") + } + if opts.Name == "" { + opts.Name = testutil.RandomName("test-venv-") + } + + cmd := exec.Command("uv", "venv", opts.Name, "--python", opts.PythonVersion, "--seed", "-q") + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + cmd.Dir = opts.Dir + err := cmd.Run() + if err != nil { + return err + } + + opts.EnvPath, err = filepath.Abs(filepath.Join(opts.Dir, opts.Name)) + if err != nil { + return err + } + + _, err = os.Stat(opts.EnvPath) + if err != nil { + return fmt.Errorf("cannot stat EnvPath %s: %s", opts.EnvPath, err) + } + + if runtime.GOOS == "windows" { + // https://github.com/pypa/virtualenv/commit/993ba1316a83b760370f5a3872b3f5ef4dd904c1 + opts.BinPath = filepath.Join(opts.EnvPath, "Scripts") + opts.PythonExe = filepath.Join(opts.BinPath, "python.exe") + } else { + opts.BinPath = filepath.Join(opts.EnvPath, "bin") + opts.PythonExe = filepath.Join(opts.BinPath, "python3") + } + + _, err = os.Stat(opts.BinPath) + if err != nil { + return fmt.Errorf("cannot stat BinPath %s: %s", opts.BinPath, err) + } + + _, err = os.Stat(opts.PythonExe) + if err != nil { + return fmt.Errorf("cannot stat PythonExe %s: %s", opts.PythonExe, err) + } + + if !opts.skipVersionCheck { + cmd := exec.Command(opts.PythonExe, "--version") + out, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("Failed to run %s --version: %s", opts.PythonExe, err) + } + outString := string(out) + expectVersion := "Python " + opts.PythonVersion + if !strings.HasPrefix(outString, expectVersion) { + return fmt.Errorf("Unexpected output from %s --version: %v (expected %v)", opts.PythonExe, outString, expectVersion) + } + } + + return nil +} + +func RequireActivatedPythonEnv(t *testing.T, ctx context.Context, opts *VenvOpts) { + err := CreatePythonEnv(opts) + require.NoError(t, err) + require.DirExists(t, opts.BinPath) + + newPath := fmt.Sprintf("%s%c%s", opts.BinPath, os.PathListSeparator, os.Getenv("PATH")) + t.Setenv("PATH", newPath) +} diff --git a/libs/python/pythontest/pythontest_test.go b/libs/python/pythontest/pythontest_test.go new file mode 100644 index 000000000..3161092d3 --- /dev/null +++ b/libs/python/pythontest/pythontest_test.go @@ -0,0 +1,43 @@ +package pythontest + +import ( + "context" + "os/exec" + "path/filepath" + "testing" + + "github.com/databricks/cli/libs/python" + "github.com/stretchr/testify/require" +) + +func TestVenvSuccess(t *testing.T) { + // Test at least two version to ensure we capture a case where venv version does not match system one + for _, pythonVersion := range []string{"3.11", "3.12"} { + t.Run(pythonVersion, func(t *testing.T) { + ctx := context.Background() + dir := t.TempDir() + opts := VenvOpts{ + PythonVersion: pythonVersion, + Dir: dir, + } + RequireActivatedPythonEnv(t, ctx, &opts) + require.DirExists(t, opts.EnvPath) + require.DirExists(t, opts.BinPath) + require.FileExists(t, opts.PythonExe) + + pythonExe, err := exec.LookPath(python.GetExecutable()) + require.NoError(t, err) + require.Equal(t, filepath.Dir(pythonExe), filepath.Dir(opts.PythonExe)) + require.FileExists(t, pythonExe) + }) + } +} + +func TestWrongVersion(t *testing.T) { + require.Error(t, CreatePythonEnv(&VenvOpts{PythonVersion: "4.0"})) +} + +func TestMissingVersion(t *testing.T) { + require.Error(t, CreatePythonEnv(nil)) + require.Error(t, CreatePythonEnv(&VenvOpts{})) +} diff --git a/libs/sync/diff.go b/libs/sync/diff.go index e91f7277e..d81a3ae65 100644 --- a/libs/sync/diff.go +++ b/libs/sync/diff.go @@ -20,7 +20,7 @@ func (d diff) IsEmpty() bool { // Compute operations required to make files in WSFS reflect current local files. // Takes into account changes since the last sync iteration. -func computeDiff(after *SnapshotState, before *SnapshotState) diff { +func computeDiff(after, before *SnapshotState) diff { d := &diff{ delete: make([]string, 0), rmdir: make([]string, 0), @@ -35,7 +35,7 @@ func computeDiff(after *SnapshotState, before *SnapshotState) diff { } // Add operators for tracked files that no longer exist. -func (d *diff) addRemovedFiles(after *SnapshotState, before *SnapshotState) { +func (d *diff) addRemovedFiles(after, before *SnapshotState) { for localName, remoteName := range before.LocalToRemoteNames { if _, ok := after.LocalToRemoteNames[localName]; !ok { d.delete = append(d.delete, remoteName) @@ -50,7 +50,7 @@ func (d *diff) addRemovedFiles(after *SnapshotState, before *SnapshotState) { // Cleanup previous remote files for files that had their remote targets change. For // example this is possible if you convert a normal python script to a notebook. -func (d *diff) addFilesWithRemoteNameChanged(after *SnapshotState, before *SnapshotState) { +func (d *diff) addFilesWithRemoteNameChanged(after, before *SnapshotState) { for localName, beforeRemoteName := range before.LocalToRemoteNames { afterRemoteName, ok := after.LocalToRemoteNames[localName] if ok && afterRemoteName != beforeRemoteName { @@ -60,7 +60,7 @@ func (d *diff) addFilesWithRemoteNameChanged(after *SnapshotState, before *Snaps } // Add operators for files that were not being tracked before. -func (d *diff) addNewFiles(after *SnapshotState, before *SnapshotState) { +func (d *diff) addNewFiles(after, before *SnapshotState) { for localName := range after.LastModifiedTimes { if _, ok := before.LastModifiedTimes[localName]; !ok { d.put = append(d.put, localName) @@ -74,7 +74,7 @@ func (d *diff) addNewFiles(after *SnapshotState, before *SnapshotState) { } // Add operators for files which had their contents updated. -func (d *diff) addUpdatedFiles(after *SnapshotState, before *SnapshotState) { +func (d *diff) addUpdatedFiles(after, before *SnapshotState) { for localName, modTime := range after.LastModifiedTimes { prevModTime, ok := before.LastModifiedTimes[localName] if ok && modTime.After(prevModTime) { diff --git a/libs/sync/event.go b/libs/sync/event.go index 8e5c0efa2..05821a477 100644 --- a/libs/sync/event.go +++ b/libs/sync/event.go @@ -73,7 +73,7 @@ func (e *EventStart) String() string { return fmt.Sprintf("Action: %s", e.EventChanges.String()) } -func newEventStart(seq int, put []string, delete []string) Event { +func newEventStart(seq int, put, delete []string) Event { return &EventStart{ EventBase: newEventBase(seq, EventTypeStart), EventChanges: &EventChanges{Put: put, Delete: delete}, @@ -133,7 +133,7 @@ func (e *EventSyncComplete) String() string { return "Complete" } -func newEventComplete(seq int, put []string, delete []string) Event { +func newEventComplete(seq int, put, delete []string) Event { return &EventSyncComplete{ EventBase: newEventBase(seq, EventTypeComplete), EventChanges: &EventChanges{Put: put, Delete: delete}, diff --git a/libs/sync/output.go b/libs/sync/output.go index c01b25ef6..e6ac8c56c 100644 --- a/libs/sync/output.go +++ b/libs/sync/output.go @@ -43,9 +43,9 @@ func TextOutput(ctx context.Context, ch <-chan Event, w io.Writer) { // Log only if something actually happened. // Sync events produce an empty string if nothing happened. if str := e.String(); str != "" { - bw.WriteString(str) - bw.WriteString("\n") - bw.Flush() + _, _ = bw.WriteString(str) + _, _ = bw.WriteString("\n") + _ = bw.Flush() } } } diff --git a/libs/sync/snapshot.go b/libs/sync/snapshot.go index f2920d8c2..a596531b9 100644 --- a/libs/sync/snapshot.go +++ b/libs/sync/snapshot.go @@ -2,6 +2,8 @@ package sync import ( "context" + "crypto/md5" + "encoding/hex" "encoding/json" "errors" "fmt" @@ -10,9 +12,6 @@ import ( "path/filepath" "time" - "crypto/md5" - "encoding/hex" - "github.com/databricks/cli/libs/fileset" "github.com/databricks/cli/libs/log" ) @@ -91,7 +90,7 @@ func GetFileName(host, remotePath string) string { func SnapshotPath(opts *SyncOptions) (string, error) { snapshotDir := filepath.Join(opts.SnapshotBasePath, syncSnapshotDirName) if _, err := os.Stat(snapshotDir); errors.Is(err, fs.ErrNotExist) { - err = os.MkdirAll(snapshotDir, 0755) + err = os.MkdirAll(snapshotDir, 0o755) if err != nil { return "", fmt.Errorf("failed to create config directory: %s", err) } @@ -122,7 +121,7 @@ func newSnapshot(ctx context.Context, opts *SyncOptions) (*Snapshot, error) { } func (s *Snapshot) Save(ctx context.Context) error { - f, err := os.OpenFile(s.snapshotPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) + f, err := os.OpenFile(s.snapshotPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0o644) if err != nil { return fmt.Errorf("failed to create/open persisted sync snapshot file: %s", err) } diff --git a/libs/sync/snapshot_state.go b/libs/sync/snapshot_state.go index 09bb5b63e..d8660ee6a 100644 --- a/libs/sync/snapshot_state.go +++ b/libs/sync/snapshot_state.go @@ -51,7 +51,6 @@ func NewSnapshotState(localFiles []fileset.File) (*SnapshotState, error) { // Compute the remote name the file will have in WSFS remoteName := f.Relative isNotebook, err := f.IsNotebook() - if err != nil { // Ignore this file if we're unable to determine the notebook type. // Trying to upload such a file to the workspace would fail anyway. diff --git a/libs/sync/sync.go b/libs/sync/sync.go index 6bd26f224..dc2c8992a 100644 --- a/libs/sync/sync.go +++ b/libs/sync/sync.go @@ -117,7 +117,7 @@ func New(ctx context.Context, opts SyncOptions) (*Sync, error) { } var notifier EventNotifier - var outputWaitGroup = &stdsync.WaitGroup{} + outputWaitGroup := &stdsync.WaitGroup{} if opts.OutputHandler != nil { ch := make(chan Event, MaxRequestsInFlight) notifier = &ChannelNotifier{ch} diff --git a/libs/tags/gcp_test.go b/libs/tags/gcp_test.go index 89f4fd8e6..7c960acbb 100644 --- a/libs/tags/gcp_test.go +++ b/libs/tags/gcp_test.go @@ -38,7 +38,6 @@ func TestGcpNormalizeKey(t *testing.T) { assert.Equal(t, "test", gcpTag.NormalizeKey("test")) assert.Equal(t, "cafe", gcpTag.NormalizeKey("café 🍎?")) assert.Equal(t, "cafe_foo", gcpTag.NormalizeKey("__café_foo__")) - } func TestGcpNormalizeValue(t *testing.T) { diff --git a/libs/template/builtin_test.go b/libs/template/builtin_test.go index 504e0acca..79e04cb84 100644 --- a/libs/template/builtin_test.go +++ b/libs/template/builtin_test.go @@ -11,18 +11,24 @@ import ( func TestBuiltin(t *testing.T) { out, err := Builtin() require.NoError(t, err) - assert.Len(t, out, 3) + assert.GreaterOrEqual(t, len(out), 3) - // Confirm names. - assert.Equal(t, "dbt-sql", out[0].Name) - assert.Equal(t, "default-python", out[1].Name) - assert.Equal(t, "default-sql", out[2].Name) + // Create a map of templates by name for easier lookup + templates := make(map[string]*BuiltinTemplate) + for _, tmpl := range out { + templates[tmpl.Name] = &tmpl + } - // Confirm that the filesystems work. - _, err = fs.Stat(out[0].FS, `template/{{.project_name}}/dbt_project.yml.tmpl`) + // Verify all expected templates exist + assert.Contains(t, templates, "dbt-sql") + assert.Contains(t, templates, "default-python") + assert.Contains(t, templates, "default-sql") + + // Verify the filesystems work for each template + _, err = fs.Stat(templates["dbt-sql"].FS, `template/{{.project_name}}/dbt_project.yml.tmpl`) assert.NoError(t, err) - _, err = fs.Stat(out[1].FS, `template/{{.project_name}}/tests/main_test.py.tmpl`) + _, err = fs.Stat(templates["default-python"].FS, `template/{{.project_name}}/tests/main_test.py.tmpl`) assert.NoError(t, err) - _, err = fs.Stat(out[2].FS, `template/{{.project_name}}/src/orders_daily.sql.tmpl`) + _, err = fs.Stat(templates["default-sql"].FS, `template/{{.project_name}}/src/orders_daily.sql.tmpl`) assert.NoError(t, err) } diff --git a/libs/template/file_test.go b/libs/template/file_test.go index bd5f6d632..ced38c284 100644 --- a/libs/template/file_test.go +++ b/libs/template/file_test.go @@ -57,7 +57,7 @@ func TestTemplateInMemoryFilePersistToDisk(t *testing.T) { t.SkipNow() } ctx := context.Background() - testInMemoryFile(t, ctx, 0755) + testInMemoryFile(t, ctx, 0o755) } func TestTemplateInMemoryFilePersistToDiskForWindows(t *testing.T) { @@ -67,7 +67,7 @@ func TestTemplateInMemoryFilePersistToDiskForWindows(t *testing.T) { // we have separate tests for windows because of differences in valid // fs.FileMode values we can use for different operating systems. ctx := context.Background() - testInMemoryFile(t, ctx, 0666) + testInMemoryFile(t, ctx, 0o666) } func TestTemplateCopyFilePersistToDisk(t *testing.T) { @@ -75,7 +75,7 @@ func TestTemplateCopyFilePersistToDisk(t *testing.T) { t.SkipNow() } ctx := context.Background() - testCopyFile(t, ctx, 0644) + testCopyFile(t, ctx, 0o644) } func TestTemplateCopyFilePersistToDiskForWindows(t *testing.T) { @@ -85,5 +85,5 @@ func TestTemplateCopyFilePersistToDiskForWindows(t *testing.T) { // we have separate tests for windows because of differences in valid // fs.FileMode values we can use for different operating systems. ctx := context.Background() - testCopyFile(t, ctx, 0666) + testCopyFile(t, ctx, 0o666) } diff --git a/libs/template/helpers.go b/libs/template/helpers.go index 7f7acbd24..4550e5fa2 100644 --- a/libs/template/helpers.go +++ b/libs/template/helpers.go @@ -31,9 +31,11 @@ type pair struct { v any } -var cachedUser *iam.User -var cachedIsServicePrincipal *bool -var cachedCatalog *string +var ( + cachedUser *iam.User + cachedIsServicePrincipal *bool + cachedCatalog *string +) // UUID that is stable for the duration of the template execution. This can be used // to populate the `bundle.uuid` field in databricks.yml by template authors. diff --git a/libs/template/helpers_test.go b/libs/template/helpers_test.go index 6c476c658..d98f40b24 100644 --- a/libs/template/helpers_test.go +++ b/libs/template/helpers_test.go @@ -158,12 +158,11 @@ func TestWorkspaceHost(t *testing.T) { assert.Len(t, r.files, 1) assert.Contains(t, string(r.files[0].(*inMemoryFile).content), "https://myhost.com") assert.Contains(t, string(r.files[0].(*inMemoryFile).content), "i3.xlarge") - } func TestWorkspaceHostNotConfigured(t *testing.T) { ctx := context.Background() - cmd := cmdio.NewIO(flags.OutputJSON, strings.NewReader(""), os.Stdout, os.Stderr, "", "template") + cmd := cmdio.NewIO(ctx, flags.OutputJSON, strings.NewReader(""), os.Stdout, os.Stderr, "", "template") ctx = cmdio.InContext(ctx, cmd) w := &databricks.WorkspaceClient{ @@ -178,5 +177,4 @@ func TestWorkspaceHostNotConfigured(t *testing.T) { err = r.walk() require.ErrorContains(t, err, "cannot determine target workspace") - } diff --git a/libs/template/materialize.go b/libs/template/materialize.go index ee30444a5..86a6a8c37 100644 --- a/libs/template/materialize.go +++ b/libs/template/materialize.go @@ -10,9 +10,11 @@ import ( "github.com/databricks/cli/libs/filer" ) -const libraryDirName = "library" -const templateDirName = "template" -const schemaFileName = "databricks_template_schema.json" +const ( + libraryDirName = "library" + templateDirName = "template" + schemaFileName = "databricks_template_schema.json" +) // This function materializes the input templates as a project, using user defined // configurations. diff --git a/libs/template/renderer.go b/libs/template/renderer.go index 0f30a67d0..5030cd9df 100644 --- a/libs/template/renderer.go +++ b/libs/template/renderer.go @@ -310,7 +310,7 @@ func (r *renderer) persistToDisk(ctx context.Context, out filer.Filer) error { if err == nil { return fmt.Errorf("failed to initialize template, one or more files already exist: %s", path) } - if err != nil && !errors.Is(err, fs.ErrNotExist) { + if !errors.Is(err, fs.ErrNotExist) { return fmt.Errorf("error while verifying file %s does not already exist: %w", path, err) } } diff --git a/libs/template/renderer_test.go b/libs/template/renderer_test.go index a4b9166da..eeb308732 100644 --- a/libs/template/renderer_test.go +++ b/libs/template/renderer_test.go @@ -27,7 +27,7 @@ import ( "github.com/stretchr/testify/require" ) -func assertFileContent(t *testing.T, path string, content string) { +func assertFileContent(t *testing.T, path, content string) { b, err := os.ReadFile(path) require.NoError(t, err) assert.Equal(t, content, string(b)) @@ -39,7 +39,7 @@ func assertFilePermissions(t *testing.T, path string, perm fs.FileMode) { assert.Equal(t, perm, info.Mode().Perm()) } -func assertBuiltinTemplateValid(t *testing.T, template string, settings map[string]any, target string, isServicePrincipal bool, build bool, tempDir string) { +func assertBuiltinTemplateValid(t *testing.T, template string, settings map[string]any, target string, isServicePrincipal, build bool, tempDir string) { ctx := context.Background() templateFS, err := fs.Sub(builtinTemplates, path.Join("templates", template)) @@ -200,8 +200,7 @@ func TestRendererWithAssociatedTemplateInLibrary(t *testing.T) { } func TestRendererExecuteTemplate(t *testing.T) { - templateText := - `"{{.count}} items are made of {{.Material}}". + templateText := `"{{.count}} items are made of {{.Material}}". {{if eq .Animal "sheep" }} Sheep wool is the best! {{else}} @@ -256,7 +255,6 @@ func TestRendererExecuteTemplateWithUnknownProperty(t *testing.T) { } func TestRendererIsSkipped(t *testing.T) { - skipPatterns := []string{"a*", "*yz", "def", "a/b/*"} // skipped paths @@ -319,22 +317,22 @@ func TestRendererPersistToDisk(t *testing.T) { skipPatterns: []string{"a/b/c", "mn*"}, files: []file{ &inMemoryFile{ - perm: 0444, + perm: 0o444, relPath: "a/b/c", content: nil, }, &inMemoryFile{ - perm: 0444, + perm: 0o444, relPath: "mno", content: nil, }, &inMemoryFile{ - perm: 0444, + perm: 0o444, relPath: "a/b/d", content: []byte("123"), }, &inMemoryFile{ - perm: 0444, + perm: 0o444, relPath: "mmnn", content: []byte("456"), }, @@ -350,9 +348,9 @@ func TestRendererPersistToDisk(t *testing.T) { assert.NoFileExists(t, filepath.Join(tmpDir, "mno")) assertFileContent(t, filepath.Join(tmpDir, "a", "b", "d"), "123") - assertFilePermissions(t, filepath.Join(tmpDir, "a", "b", "d"), 0444) + assertFilePermissions(t, filepath.Join(tmpDir, "a", "b", "d"), 0o444) assertFileContent(t, filepath.Join(tmpDir, "mmnn"), "456") - assertFilePermissions(t, filepath.Join(tmpDir, "mmnn"), 0444) + assertFilePermissions(t, filepath.Join(tmpDir, "mmnn"), 0o444) } func TestRendererWalk(t *testing.T) { @@ -520,8 +518,8 @@ func TestRendererReadsPermissionsBits(t *testing.T) { } assert.Len(t, r.files, 2) - assert.Equal(t, getPermissions(r, "script.sh"), fs.FileMode(0755)) - assert.Equal(t, getPermissions(r, "not-a-script"), fs.FileMode(0644)) + assert.Equal(t, getPermissions(r, "script.sh"), fs.FileMode(0o755)) + assert.Equal(t, getPermissions(r, "not-a-script"), fs.FileMode(0o644)) } func TestRendererErrorOnConflictingFile(t *testing.T) { @@ -537,7 +535,7 @@ func TestRendererErrorOnConflictingFile(t *testing.T) { skipPatterns: []string{}, files: []file{ &inMemoryFile{ - perm: 0444, + perm: 0o444, relPath: "a", content: []byte("123"), }, @@ -563,7 +561,7 @@ func TestRendererNoErrorOnConflictingFileIfSkipped(t *testing.T) { skipPatterns: []string{"a"}, files: []file{ &inMemoryFile{ - perm: 0444, + perm: 0o444, relPath: "a", content: []byte("123"), }, diff --git a/libs/template/templates/dbt-sql/template/{{.project_name}}/.vscode/settings.json.tmpl b/libs/template/templates/dbt-sql/template/{{.project_name}}/.vscode/settings.json.tmpl index 562ba136f..3eca01226 100644 --- a/libs/template/templates/dbt-sql/template/{{.project_name}}/.vscode/settings.json.tmpl +++ b/libs/template/templates/dbt-sql/template/{{.project_name}}/.vscode/settings.json.tmpl @@ -1,6 +1,5 @@ { "python.analysis.stubPath": ".vscode", - "databricks.python.envFile": "${workspaceFolder}/.env", "jupyter.interactiveWindow.cellMarker.codeRegex": "^# COMMAND ----------|^# Databricks notebook source|^(#\\s*%%|#\\s*\\|#\\s*In\\[\\d*?\\]|#\\s*In\\[ \\])", "jupyter.interactiveWindow.cellMarker.default": "# COMMAND ----------", "python.testing.pytestArgs": [ diff --git a/libs/template/templates/default-python/template/{{.project_name}}/.vscode/settings.json b/libs/template/templates/default-python/template/{{.project_name}}/.vscode/settings.json index f19498daa..8ee87c30d 100644 --- a/libs/template/templates/default-python/template/{{.project_name}}/.vscode/settings.json +++ b/libs/template/templates/default-python/template/{{.project_name}}/.vscode/settings.json @@ -1,6 +1,5 @@ { "python.analysis.stubPath": ".vscode", - "databricks.python.envFile": "${workspaceFolder}/.env", "jupyter.interactiveWindow.cellMarker.codeRegex": "^# COMMAND ----------|^# Databricks notebook source|^(#\\s*%%|#\\s*\\|#\\s*In\\[\\d*?\\]|#\\s*In\\[ \\])", "jupyter.interactiveWindow.cellMarker.default": "# COMMAND ----------", "python.testing.pytestArgs": [ diff --git a/libs/template/templates/default-sql/template/{{.project_name}}/.vscode/settings.json.tmpl b/libs/template/templates/default-sql/template/{{.project_name}}/.vscode/settings.json.tmpl index c63af24b4..03a365f9d 100644 --- a/libs/template/templates/default-sql/template/{{.project_name}}/.vscode/settings.json.tmpl +++ b/libs/template/templates/default-sql/template/{{.project_name}}/.vscode/settings.json.tmpl @@ -1,6 +1,5 @@ { "python.analysis.stubPath": ".vscode", - "databricks.python.envFile": "${workspaceFolder}/.env", "jupyter.interactiveWindow.cellMarker.codeRegex": "^# COMMAND ----------|^# Databricks notebook source|^(#\\s*%%|#\\s*\\|#\\s*In\\[\\d*?\\]|#\\s*In\\[ \\])", "jupyter.interactiveWindow.cellMarker.default": "# COMMAND ----------", "python.testing.pytestArgs": [ diff --git a/libs/testdiff/testdiff.go b/libs/testdiff/testdiff.go new file mode 100644 index 000000000..1e1df727a --- /dev/null +++ b/libs/testdiff/testdiff.go @@ -0,0 +1,90 @@ +package testdiff + +import ( + "fmt" + "strings" + + "github.com/databricks/cli/internal/testutil" + "github.com/hexops/gotextdiff" + "github.com/hexops/gotextdiff/myers" + "github.com/hexops/gotextdiff/span" + "github.com/stretchr/testify/assert" + "github.com/wI2L/jsondiff" +) + +func UnifiedDiff(filename1, filename2, s1, s2 string) string { + edits := myers.ComputeEdits(span.URIFromPath(filename1), s1, s2) + return fmt.Sprint(gotextdiff.ToUnified(filename1, filename2, s1, edits)) +} + +func AssertEqualTexts(t testutil.TestingT, filename1, filename2, expected, out string) { + if len(out) < 1000 && len(expected) < 1000 { + // This shows full strings + diff which could be useful when debugging newlines + assert.Equal(t, expected, out) + } else { + // only show diff for large texts + diff := UnifiedDiff(filename1, filename2, expected, out) + t.Errorf("Diff:\n" + diff) + } +} + +func AssertEqualJQ(t testutil.TestingT, expectedName, outName, expected, out string, ignorePaths []string) { + patch, err := jsondiff.CompareJSON([]byte(expected), []byte(out)) + if err != nil { + t.Logf("CompareJSON error for %s vs %s: %s (fallback to textual comparison)", outName, expectedName, err) + AssertEqualTexts(t, expectedName, outName, expected, out) + } else { + diff := UnifiedDiff(expectedName, outName, expected, out) + t.Logf("Diff:\n%s", diff) + allowedDiffs := []string{} + erroredDiffs := []string{} + for _, op := range patch { + if allowDifference(ignorePaths, op) { + allowedDiffs = append(allowedDiffs, fmt.Sprintf("%7s %s %v old=%v", op.Type, op.Path, op.Value, op.OldValue)) + } else { + erroredDiffs = append(erroredDiffs, fmt.Sprintf("%7s %s %v old=%v", op.Type, op.Path, op.Value, op.OldValue)) + } + } + if len(allowedDiffs) > 0 { + t.Logf("Allowed differences between %s and %s:\n ==> %s", expectedName, outName, strings.Join(allowedDiffs, "\n ==> ")) + } + if len(erroredDiffs) > 0 { + t.Errorf("Unexpected differences between %s and %s:\n ==> %s", expectedName, outName, strings.Join(erroredDiffs, "\n ==> ")) + } + } +} + +func allowDifference(ignorePaths []string, op jsondiff.Operation) bool { + if matchesPrefixes(ignorePaths, op.Path) { + return true + } + if op.Type == "replace" && almostSameStrings(op.OldValue, op.Value) { + return true + } + return false +} + +// compare strings and ignore forward vs backward slashes +func almostSameStrings(v1, v2 any) bool { + s1, ok := v1.(string) + if !ok { + return false + } + s2, ok := v2.(string) + if !ok { + return false + } + return strings.ReplaceAll(s1, "\\", "/") == strings.ReplaceAll(s2, "\\", "/") +} + +func matchesPrefixes(prefixes []string, path string) bool { + for _, p := range prefixes { + if p == path { + return true + } + if strings.HasPrefix(path, p+"/") { + return true + } + } + return false +} diff --git a/libs/testdiff/testdiff_test.go b/libs/testdiff/testdiff_test.go new file mode 100644 index 000000000..869fee78a --- /dev/null +++ b/libs/testdiff/testdiff_test.go @@ -0,0 +1,20 @@ +package testdiff + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestDiff(t *testing.T) { + assert.Equal(t, "", UnifiedDiff("a", "b", "", "")) + assert.Equal(t, "", UnifiedDiff("a", "b", "abc", "abc")) + assert.Equal(t, "--- a\n+++ b\n@@ -1 +1,2 @@\n abc\n+123\n", UnifiedDiff("a", "b", "abc\n", "abc\n123\n")) +} + +func TestMatchesPrefixes(t *testing.T) { + assert.False(t, matchesPrefixes([]string{}, "")) + assert.False(t, matchesPrefixes([]string{"/hello", "/hello/world"}, "")) + assert.True(t, matchesPrefixes([]string{"/hello", "/a/b"}, "/hello")) + assert.True(t, matchesPrefixes([]string{"/hello", "/a/b"}, "/a/b/c")) +} diff --git a/libs/textutil/textutil_test.go b/libs/textutil/textutil_test.go index f6834a1ef..b9268c98b 100644 --- a/libs/textutil/textutil_test.go +++ b/libs/textutil/textutil_test.go @@ -50,7 +50,8 @@ func TestNormalizeString(t *testing.T) { { input: ".test//test..test", expected: "test_test_test", - }} + }, + } for _, c := range cases { assert.Equal(t, c.expected, NormalizeString(c.input)) diff --git a/libs/vfs/leaf.go b/libs/vfs/leaf.go deleted file mode 100644 index 8c11f9039..000000000 --- a/libs/vfs/leaf.go +++ /dev/null @@ -1,29 +0,0 @@ -package vfs - -import ( - "errors" - "io/fs" -) - -// FindLeafInTree returns the first path that holds `name`, -// traversing up to the root of the filesystem, starting at `p`. -func FindLeafInTree(p Path, name string) (Path, error) { - for p != nil { - _, err := fs.Stat(p, name) - - // No error means we found the leaf in p. - if err == nil { - return p, nil - } - - // ErrNotExist means we continue traversal up the tree. - if errors.Is(err, fs.ErrNotExist) { - p = p.Parent() - continue - } - - return nil, err - } - - return nil, fs.ErrNotExist -} diff --git a/libs/vfs/leaf_test.go b/libs/vfs/leaf_test.go deleted file mode 100644 index da9412ec0..000000000 --- a/libs/vfs/leaf_test.go +++ /dev/null @@ -1,38 +0,0 @@ -package vfs - -import ( - "os" - "path/filepath" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestFindLeafInTree(t *testing.T) { - wd, err := os.Getwd() - require.NoError(t, err) - - root := filepath.Join(wd, "..", "..") - - // Find from working directory should work. - { - out, err := FindLeafInTree(MustNew(wd), ".git") - assert.NoError(t, err) - assert.Equal(t, root, out.Native()) - } - - // Find from project root itself should work. - { - out, err := FindLeafInTree(MustNew(root), ".git") - assert.NoError(t, err) - assert.Equal(t, root, out.Native()) - } - - // Find for something that doesn't exist should work. - { - out, err := FindLeafInTree(MustNew(root), "this-leaf-doesnt-exist-anywhere") - assert.ErrorIs(t, err, os.ErrNotExist) - assert.Equal(t, nil, out) - } -}