diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index 303c7855..5f4b5086 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -0c86ea6dbd9a730c24ff0d4e509603e476955ac5 \ No newline at end of file +d25296d2f4aa7bd6195c816fdf82e0f960f775da \ No newline at end of file diff --git a/.codegen/service.go.tmpl b/.codegen/service.go.tmpl index b489a0b0..ef7977e1 100644 --- a/.codegen/service.go.tmpl +++ b/.codegen/service.go.tmpl @@ -115,6 +115,9 @@ func new{{.PascalName}}() *cobra.Command { {{- if .Request}} var {{.CamelName}}Req {{.Service.Package.Name}}.{{.Request.PascalName}} + {{- if .RequestBodyField }} + {{.CamelName}}Req.{{.RequestBodyField.PascalName}} = &{{.Service.Package.Name}}.{{.RequestBodyField.Entity.PascalName}}{} + {{- end }} {{- if .CanUseJson}} var {{.CamelName}}Json flags.JsonFlag {{- end}} @@ -127,21 +130,27 @@ func new{{.PascalName}}() *cobra.Command { cmd.Flags().BoolVar(&{{.CamelName}}SkipWait, "no-wait", {{.CamelName}}SkipWait, `do not wait to reach {{range $i, $e := .Wait.Success}}{{if $i}} or {{end}}{{.Content}}{{end}} state`) cmd.Flags().DurationVar(&{{.CamelName}}Timeout, "timeout", {{.Wait.Timeout}}*time.Minute, `maximum amount of time to reach {{range $i, $e := .Wait.Success}}{{if $i}} or {{end}}{{.Content}}{{end}} state`) {{end -}} - {{if .Request}}// TODO: short flags + {{- $request := .Request -}} + {{- if .RequestBodyField -}} + {{- $request = .RequestBodyField.Entity -}} + {{- end -}} + {{if $request }}// TODO: short flags {{- if .CanUseJson}} cmd.Flags().Var(&{{.CamelName}}Json, "json", `either inline JSON string or @path/to/file.json with request body`) {{- end}} {{$method := .}} {{ if not .IsJsonOnly }} - {{range .Request.Fields -}} + {{range $request.Fields -}} {{- if not .Required -}} {{if .Entity.IsObject }}// TODO: complex arg: {{.Name}} {{else if .Entity.IsAny }}// TODO: any: {{.Name}} {{else if .Entity.ArrayValue }}// TODO: array: {{.Name}} {{else if .Entity.MapValue }}// TODO: map via StringToStringVar: {{.Name}} {{else if .Entity.IsEmpty }}// TODO: output-only field - {{else if .Entity.Enum }}cmd.Flags().Var(&{{$method.CamelName}}Req.{{.PascalName}}, "{{.KebabName}}", `{{.Summary | without "`" | trimSuffix "."}}. Supported values: {{template "printArray" .Entity.Enum}}`) - {{else}}cmd.Flags().{{template "arg-type" .Entity}}(&{{$method.CamelName}}Req.{{.PascalName}}, "{{.KebabName}}", {{$method.CamelName}}Req.{{.PascalName}}, `{{.Summary | without "`"}}`) + {{else if .Entity.IsComputed -}} + {{else if .IsOutputOnly -}} + {{else if .Entity.Enum }}cmd.Flags().Var(&{{- template "request-body-obj" (dict "Method" $method "Field" .)}}, "{{.KebabName}}", `{{.Summary | without "`" | trimSuffix "."}}. Supported values: {{template "printArray" .Entity.Enum}}`) + {{else}}cmd.Flags().{{template "arg-type" .Entity}}(&{{- template "request-body-obj" (dict "Method" $method "Field" .)}}, "{{.KebabName}}", {{- template "request-body-obj" (dict "Method" $method "Field" .)}}, `{{.Summary | without "`"}}`) {{end}} {{- end -}} {{- end}} @@ -161,14 +170,14 @@ func new{{.PascalName}}() *cobra.Command { {{- $noPrompt := or .IsCrudCreate (in $excludeFromPrompts $fullCommandName) }} {{- $hasPosArgs := .HasRequiredPositionalArguments -}} - {{- $hasSinglePosArg := and $hasPosArgs (eq 1 (len .Request.RequiredFields)) -}} + {{- $hasSinglePosArg := and $hasPosArgs (eq 1 (len $request.RequiredFields)) -}} {{- $serviceHasNamedIdMap := and (and .Service.List .Service.List.NamedIdMap) (not (eq .PascalName "List")) -}} {{- $hasIdPrompt := and (not $noPrompt) (and $hasSinglePosArg $serviceHasNamedIdMap) -}} {{- $wait := and .Wait (and (not .IsCrudRead) (not (eq .SnakeName "get_run"))) -}} {{- $hasRequiredArgs := and (not $hasIdPrompt) $hasPosArgs -}} - {{- $hasSingleRequiredRequestBodyFieldWithPrompt := and (and $hasIdPrompt .Request) (eq 1 (len .Request.RequiredRequestBodyFields)) -}} - {{- $onlyPathArgsRequiredAsPositionalArguments := and .Request (eq (len .RequiredPositionalArguments) (len .Request.RequiredPathFields)) -}} - {{- $hasDifferentArgsWithJsonFlag := and (not $onlyPathArgsRequiredAsPositionalArguments) (and .CanUseJson .Request.HasRequiredRequestBodyFields) -}} + {{- $hasSingleRequiredRequestBodyFieldWithPrompt := and (and $hasIdPrompt $request) (eq 1 (len $request.RequiredRequestBodyFields)) -}} + {{- $onlyPathArgsRequiredAsPositionalArguments := and $request (eq (len .RequiredPositionalArguments) (len $request.RequiredPathFields)) -}} + {{- $hasDifferentArgsWithJsonFlag := and (not $onlyPathArgsRequiredAsPositionalArguments) (and .CanUseJson (or $request.HasRequiredRequestBodyFields )) -}} {{- $hasCustomArgHandler := or $hasRequiredArgs $hasDifferentArgsWithJsonFlag -}} {{- $atleastOneArgumentWithDescription := false -}} @@ -206,12 +215,12 @@ func new{{.PascalName}}() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { {{- if $hasDifferentArgsWithJsonFlag }} if cmd.Flags().Changed("json") { - err := root.ExactArgs({{len .Request.RequiredPathFields}})(cmd, args) + err := root.ExactArgs({{len $request.RequiredPathFields}})(cmd, args) if err != nil { - {{- if eq 0 (len .Request.RequiredPathFields) }} - return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide{{- range $index, $field := .Request.RequiredFields}}{{if $index}},{{end}} '{{$field.Name}}'{{end}} in your JSON input") + {{- if eq 0 (len $request.RequiredPathFields) }} + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide{{- range $index, $field := $request.RequiredFields}}{{if $index}},{{end}} '{{$field.Name}}'{{end}} in your JSON input") {{- else }} - return fmt.Errorf("when --json flag is specified, provide only{{- range $index, $field := .Request.RequiredPathFields}}{{if $index}},{{end}} {{$field.ConstantName}}{{end}} as positional arguments. Provide{{- range $index, $field := .Request.RequiredRequestBodyFields}}{{if $index}},{{end}} '{{$field.Name}}'{{end}} in your JSON input") + return fmt.Errorf("when --json flag is specified, provide only{{- range $index, $field := $request.RequiredPathFields}}{{if $index}},{{end}} {{$field.ConstantName}}{{end}} as positional arguments. Provide{{- range $index, $field := $request.RequiredRequestBodyFields}}{{if $index}},{{end}} '{{$field.Name}}'{{end}} in your JSON input") {{- end }} } return nil @@ -232,7 +241,7 @@ func new{{.PascalName}}() *cobra.Command { {{- if .Request }} {{ if .CanUseJson }} if cmd.Flags().Changed("json") { - diags := {{.CamelName}}Json.Unmarshal(&{{.CamelName}}Req) + diags := {{.CamelName}}Json.Unmarshal(&{{.CamelName}}Req{{ if .RequestBodyField }}.{{.RequestBodyField.PascalName}}{{ end }}) if diags.HasError() { return diags.Error() } @@ -251,20 +260,20 @@ func new{{.PascalName}}() *cobra.Command { {{- if $hasIdPrompt}} if len(args) == 0 { promptSpinner := cmdio.Spinner(ctx) - promptSpinner <- "No{{range .Request.RequiredFields}} {{.ConstantName}}{{end}} argument specified. Loading names for {{.Service.TitleName}} drop-down." + promptSpinner <- "No{{range $request.RequiredFields}} {{.ConstantName}}{{end}} argument specified. Loading names for {{.Service.TitleName}} drop-down." names, err := {{if .Service.IsAccounts}}a{{else}}w{{end}}.{{(.Service.TrimPrefix "account").PascalName}}.{{.Service.List.NamedIdMap.PascalName}}(ctx{{if .Service.List.Request}}, {{.Service.Package.Name}}.{{.Service.List.Request.PascalName}}{}{{end}}) close(promptSpinner) if err != nil { return fmt.Errorf("failed to load names for {{.Service.TitleName}} drop-down. Please manually specify required arguments. Original error: %w", err) } - id, err := cmdio.Select(ctx, names, "{{range .Request.RequiredFields}}{{.Summary | trimSuffix "."}}{{end}}") + id, err := cmdio.Select(ctx, names, "{{range $request.RequiredFields}}{{.Summary | trimSuffix "."}}{{end}}") if err != nil { return err } args = append(args, id) } if len(args) != 1 { - return fmt.Errorf("expected to have {{range .Request.RequiredFields}}{{.Summary | trimSuffix "." | lower}}{{end}}") + return fmt.Errorf("expected to have {{range $request.RequiredFields}}{{.Summary | trimSuffix "." | lower}}{{end}}") } {{- end -}} @@ -388,13 +397,19 @@ func new{{.PascalName}}() *cobra.Command { if !cmd.Flags().Changed("json") { {{- end }} {{if not $field.Entity.IsString -}} - _, err = fmt.Sscan(args[{{$arg}}], &{{$method.CamelName}}Req.{{$field.PascalName}}) + _, err = fmt.Sscan(args[{{$arg}}], &{{- template "request-body-obj" (dict "Method" $method "Field" $field)}}) if err != nil { return fmt.Errorf("invalid {{$field.ConstantName}}: %s", args[{{$arg}}]) }{{else -}} - {{$method.CamelName}}Req.{{$field.PascalName}} = args[{{$arg}}] + {{- template "request-body-obj" (dict "Method" $method "Field" $field)}} = args[{{$arg}}] {{- end -}} {{- if $optionalIfJsonIsUsed }} } {{- end }} {{- end -}} + +{{- define "request-body-obj" -}} + {{- $method := .Method -}} + {{- $field := .Field -}} + {{$method.CamelName}}Req{{ if (and $method.RequestBodyField (not $field.IsPath)) }}.{{$method.RequestBodyField.PascalName}}{{end}}.{{$field.PascalName}} +{{- end -}} diff --git a/.gitattributes b/.gitattributes index 2470eb33..ecb5669e 100755 --- a/.gitattributes +++ b/.gitattributes @@ -30,13 +30,14 @@ cmd/account/users/users.go linguist-generated=true cmd/account/vpc-endpoints/vpc-endpoints.go linguist-generated=true cmd/account/workspace-assignment/workspace-assignment.go linguist-generated=true cmd/account/workspaces/workspaces.go linguist-generated=true +cmd/workspace/aibi-dashboard-embedding-access-policy/aibi-dashboard-embedding-access-policy.go linguist-generated=true +cmd/workspace/aibi-dashboard-embedding-approved-domains/aibi-dashboard-embedding-approved-domains.go linguist-generated=true cmd/workspace/alerts-legacy/alerts-legacy.go linguist-generated=true cmd/workspace/alerts/alerts.go linguist-generated=true cmd/workspace/apps/apps.go linguist-generated=true cmd/workspace/artifact-allowlists/artifact-allowlists.go linguist-generated=true cmd/workspace/automatic-cluster-update/automatic-cluster-update.go linguist-generated=true cmd/workspace/catalogs/catalogs.go linguist-generated=true -cmd/workspace/clean-rooms/clean-rooms.go linguist-generated=true cmd/workspace/cluster-policies/cluster-policies.go linguist-generated=true cmd/workspace/clusters/clusters.go linguist-generated=true cmd/workspace/cmd.go linguist-generated=true @@ -48,12 +49,14 @@ cmd/workspace/consumer-listings/consumer-listings.go linguist-generated=true cmd/workspace/consumer-personalization-requests/consumer-personalization-requests.go linguist-generated=true cmd/workspace/consumer-providers/consumer-providers.go linguist-generated=true cmd/workspace/credentials-manager/credentials-manager.go linguist-generated=true +cmd/workspace/credentials/credentials.go linguist-generated=true cmd/workspace/current-user/current-user.go linguist-generated=true cmd/workspace/dashboard-widgets/dashboard-widgets.go linguist-generated=true cmd/workspace/dashboards/dashboards.go linguist-generated=true cmd/workspace/data-sources/data-sources.go linguist-generated=true cmd/workspace/default-namespace/default-namespace.go linguist-generated=true cmd/workspace/disable-legacy-access/disable-legacy-access.go linguist-generated=true +cmd/workspace/disable-legacy-dbfs/disable-legacy-dbfs.go linguist-generated=true cmd/workspace/enhanced-security-monitoring/enhanced-security-monitoring.go linguist-generated=true cmd/workspace/experiments/experiments.go linguist-generated=true cmd/workspace/external-locations/external-locations.go linguist-generated=true diff --git a/.github/workflows/external-message.yml b/.github/workflows/external-message.yml new file mode 100644 index 00000000..1970735f --- /dev/null +++ b/.github/workflows/external-message.yml @@ -0,0 +1,56 @@ +name: PR Comment + +# WARNING: +# THIS WORKFLOW ALWAYS RUNS FOR EXTERNAL CONTRIBUTORS WITHOUT ANY APPROVAL. +# THIS WORKFLOW RUNS FROM MAIN BRANCH, NOT FROM THE PR BRANCH. +# DO NOT PULL THE PR OR EXECUTE ANY CODE FROM THE PR. + +on: + pull_request_target: + types: [opened, reopened, synchronize] + branches: + - main + +jobs: + comment-on-pr: + runs-on: ubuntu-latest + permissions: + pull-requests: write + + steps: + - uses: actions/checkout@v4 + + - name: Delete old comments + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + # Delete previous comment if it exists + previous_comment_ids=$(gh api "repos/${{ github.repository }}/issues/${{ github.event.pull_request.number }}/comments" \ + --jq '.[] | select(.body | startswith("")) | .id') + echo "Previous comment IDs: $previous_comment_ids" + # Iterate over each comment ID and delete the comment + if [ ! -z "$previous_comment_ids" ]; then + echo "$previous_comment_ids" | while read -r comment_id; do + echo "Deleting comment with ID: $comment_id" + gh api "repos/${{ github.repository }}/issues/comments/$comment_id" -X DELETE + done + fi + + - name: Comment on PR + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + COMMIT_SHA: ${{ github.event.pull_request.head.sha }} + run: | + gh pr comment ${{ github.event.pull_request.number }} --body \ + " + If integration tests don't run automatically, an authorized user can run them manually by following the instructions below: + + Trigger: + [go/deco-tests-run/cli](https://go/deco-tests-run/cli) + + Inputs: + * PR number: ${{github.event.pull_request.number}} + * Commit SHA: \`${{ env.COMMIT_SHA }}\` + + Checks will be approved automatically on success. + " diff --git a/.github/workflows/integration-tests.yml b/.github/workflows/integration-tests.yml new file mode 100644 index 00000000..d56728c2 --- /dev/null +++ b/.github/workflows/integration-tests.yml @@ -0,0 +1,78 @@ +name: integration + +on: + + pull_request: + types: [opened, synchronize] + + merge_group: + + +jobs: + check-token: + runs-on: ubuntu-latest + environment: "test-trigger-is" + outputs: + has_token: ${{ steps.set-token-status.outputs.has_token }} + steps: + - name: Check if DECO_WORKFLOW_TRIGGER_APP_ID is set + id: set-token-status + run: | + if [ -z "${{ secrets.DECO_WORKFLOW_TRIGGER_APP_ID }}" ]; then + echo "DECO_WORKFLOW_TRIGGER_APP_ID is empty. User has no access to secrets." + echo "::set-output name=has_token::false" + else + echo "DECO_WORKFLOW_TRIGGER_APP_ID is set. User has access to secrets." + echo "::set-output name=has_token::true" + fi + + trigger-tests: + runs-on: ubuntu-latest + needs: check-token + if: github.event_name == 'pull_request' && needs.check-token.outputs.has_token == 'true' + environment: "test-trigger-is" + + steps: + - uses: actions/checkout@v4 + + - name: Generate GitHub App Token + id: generate-token + uses: actions/create-github-app-token@v1 + with: + app-id: ${{ secrets.DECO_WORKFLOW_TRIGGER_APP_ID }} + private-key: ${{ secrets.DECO_WORKFLOW_TRIGGER_PRIVATE_KEY }} + owner: ${{ secrets.ORG_NAME }} + repositories: ${{secrets.REPO_NAME}} + + - name: Trigger Workflow in Another Repo + env: + GH_TOKEN: ${{ steps.generate-token.outputs.token }} + run: | + gh workflow run cli-isolated-pr.yml -R ${{ secrets.ORG_NAME }}/${{secrets.REPO_NAME}} \ + --ref main \ + -f pull_request_number=${{ github.event.pull_request.number }} \ + -f commit_sha=${{ github.event.pull_request.head.sha }} + + + + # Statuses and checks apply to specific commits (by hash). + # Enforcement of required checks is done both at the PR level and the merge queue level. + # In case of multiple commits in a single PR, the hash of the squashed commit + # will not match the one for the latest (approved) commit in the PR. + # We auto approve the check for the merge queue for two reasons: + # * Queue times out due to duration of tests. + # * Avoid running integration tests twice, since it was already run at the tip of the branch before squashing. + auto-approve: + if: github.event_name == 'merge_group' + runs-on: ubuntu-latest + steps: + - name: Mark Check + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + shell: bash + run: | + gh api -X POST -H "Accept: application/vnd.github+json" \ + -H "X-GitHub-Api-Version: 2022-11-28" \ + /repos/${{ github.repository }}/statuses/${{ github.sha }} \ + -f 'state=success' \ + -f 'context=Integration Tests Check' diff --git a/.github/workflows/push.yml b/.github/workflows/push.yml index ee60da9d..8aea9549 100644 --- a/.github/workflows/push.yml +++ b/.github/workflows/push.yml @@ -33,7 +33,7 @@ jobs: - name: Setup Go uses: actions/setup-go@v5 with: - go-version: 1.22.7 + go-version: 1.23.2 - name: Setup Python uses: actions/setup-python@v5 @@ -68,7 +68,7 @@ jobs: - name: Setup Go uses: actions/setup-go@v5 with: - go-version: 1.22.7 + go-version: 1.23.2 # No need to download cached dependencies when running gofmt. cache: false @@ -100,7 +100,7 @@ jobs: - name: Setup Go uses: actions/setup-go@v5 with: - go-version: 1.22.7 + go-version: 1.23.2 # Github repo: https://github.com/ajv-validator/ajv-cli - name: Install ajv-cli diff --git a/.github/workflows/release-snapshot.yml b/.github/workflows/release-snapshot.yml index 6a601a5f..4392c248 100644 --- a/.github/workflows/release-snapshot.yml +++ b/.github/workflows/release-snapshot.yml @@ -6,6 +6,15 @@ on: - "main" - "demo-*" + # Confirm that snapshot builds work if this file is modified. + pull_request: + types: + - opened + - synchronize + - reopened + paths: + - ".github/workflows/release-snapshot.yml" + workflow_dispatch: jobs: @@ -21,7 +30,7 @@ jobs: - name: Setup Go uses: actions/setup-go@v5 with: - go-version: 1.22.7 + go-version: 1.23.2 # The default cache key for this action considers only the `go.sum` file. # We include .goreleaser.yaml here to differentiate from the cache used by the push action diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index f9742a19..e8f59f9b 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -22,7 +22,7 @@ jobs: - name: Setup Go uses: actions/setup-go@v5 with: - go-version: 1.22.7 + go-version: 1.23.2 # The default cache key for this action considers only the `go.sum` file. # We include .goreleaser.yaml here to differentiate from the cache used by the push action @@ -63,7 +63,7 @@ jobs: echo "VERSION=${VERSION:1}" >> $GITHUB_ENV - name: Update setup-cli - uses: actions/github-script@v6 + uses: actions/github-script@v7 with: github-token: ${{ secrets.DECO_GITHUB_TOKEN }} script: | @@ -87,7 +87,7 @@ jobs: echo "VERSION=${VERSION:1}" >> $GITHUB_ENV - name: Update homebrew-tap - uses: actions/github-script@v6 + uses: actions/github-script@v7 with: github-token: ${{ secrets.DECO_GITHUB_TOKEN }} script: | @@ -124,7 +124,7 @@ jobs: echo "VERSION=${VERSION:1}" >> $GITHUB_ENV - name: Update CLI version in the VSCode extension - uses: actions/github-script@v6 + uses: actions/github-script@v7 with: github-token: ${{ secrets.DECO_GITHUB_TOKEN }} script: | diff --git a/.goreleaser.yaml b/.goreleaser.yaml index 3f0bdb2c..76395c9a 100644 --- a/.goreleaser.yaml +++ b/.goreleaser.yaml @@ -95,7 +95,7 @@ checksum: algorithm: sha256 snapshot: - name_template: '{{ incpatch .Version }}-dev+{{ .ShortCommit }}' + version_template: '{{ incpatch .Version }}-dev+{{ .ShortCommit }}' changelog: sort: asc diff --git a/CHANGELOG.md b/CHANGELOG.md index f31bb10b..e5b6496b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,119 @@ # Version changelog +## [Release] Release v0.234.0 + +Bundles: + * Do not execute build on bundle destroy ([#1882](https://github.com/databricks/cli/pull/1882)). + * Add support for non-Python ipynb notebooks to DABs ([#1827](https://github.com/databricks/cli/pull/1827)). + +API Changes: + * Added `databricks credentials` command group. + * Changed `databricks lakeview create` command with new required argument order. + +OpenAPI commit d25296d2f4aa7bd6195c816fdf82e0f960f775da (2024-11-07) +Dependency updates: + * Upgrade TF provider to 1.58.0 ([#1900](https://github.com/databricks/cli/pull/1900)). + * Bump golang.org/x/sync from 0.8.0 to 0.9.0 ([#1892](https://github.com/databricks/cli/pull/1892)). + * Bump golang.org/x/text from 0.19.0 to 0.20.0 ([#1893](https://github.com/databricks/cli/pull/1893)). + * Bump golang.org/x/mod from 0.21.0 to 0.22.0 ([#1895](https://github.com/databricks/cli/pull/1895)). + * Bump golang.org/x/oauth2 from 0.23.0 to 0.24.0 ([#1894](https://github.com/databricks/cli/pull/1894)). + * Bump github.com/databricks/databricks-sdk-go from 0.49.0 to 0.51.0 ([#1878](https://github.com/databricks/cli/pull/1878)). + +## [Release] Release v0.233.0 + +CLI: + * Clean host URL in the `auth login` command ([#1879](https://github.com/databricks/cli/pull/1879)). + +Bundles: + * Fix bundle run when run interactively ([#1880](https://github.com/databricks/cli/pull/1880)). + * Fix relative path resolution for dashboards on Windows ([#1881](https://github.com/databricks/cli/pull/1881)). + +Internal: + * Address goreleaser deprecation warning ([#1872](https://github.com/databricks/cli/pull/1872)). + * Update actions/github-script to v7 ([#1873](https://github.com/databricks/cli/pull/1873)). + * Use Go 1.23 ([#1871](https://github.com/databricks/cli/pull/1871)). + * [Internal] Always write message for manual integration test trigger ([#1874](https://github.com/databricks/cli/pull/1874)). + * Add `cmd-exec-id` to user agent ([#1808](https://github.com/databricks/cli/pull/1808)). + * Added E2E test to run Python wheels on interactive cluster created in bundle ([#1864](https://github.com/databricks/cli/pull/1864)). + + +Dependency updates: + * Bump github.com/hashicorp/terraform-json from 0.22.1 to 0.23.0 ([#1877](https://github.com/databricks/cli/pull/1877)). + +## [Release] Release v0.232.1 + +This patch release fixes the following error observed when deploying to /Shared root folder +"Error: Path (/Shared/.bundle/.../resources) doesn't exist" + +Bundles: + * Fixed adding /Workspace prefix for resource paths ([#1866](https://github.com/databricks/cli/pull/1866)). + + +## [Release] Release v0.232.0 + +**New features for Databricks Asset Bundles:** + +This release adds support for managing AI/BI dashboards as part of your bundle configuration. The `bundle generate` command is updated to support producing dashboard bundle configuration as well as a serialized JSON representation of the dashboard. +You can find an example configuration and walkthrough at https://github.com/databricks/bundle-examples/tree/main/knowledge_base/dashboard_nyc_taxi + +CLI: + * Add privacy notice to README ([#1841](https://github.com/databricks/cli/pull/1841)). + +Bundles: + * Add support for AI/BI dashboards ([#1743](https://github.com/databricks/cli/pull/1743)). + * Added validator for folder permissions ([#1824](https://github.com/databricks/cli/pull/1824)). + * Add bundle generate variant for dashboards ([#1847](https://github.com/databricks/cli/pull/1847)). + * Use SetPermissions instead of UpdatePermissions when setting folder permissions based on top-level ones ([#1822](https://github.com/databricks/cli/pull/1822)). + +Internal: + * Attempt to reduce test flakiness on Windows ([#1845](https://github.com/databricks/cli/pull/1845)). + * Reuse resource resolution code for the run command ([#1858](https://github.com/databricks/cli/pull/1858)). + * [Internal] Automatically trigger integration tests on PR ([#1857](https://github.com/databricks/cli/pull/1857)). + * [Internal] Add test instructions for external contributors ([#1863](https://github.com/databricks/cli/pull/1863)). + * Add `libs/dyn/jsonsaver` ([#1862](https://github.com/databricks/cli/pull/1862)). + + +Dependency updates: + * Bump github.com/fatih/color from 1.17.0 to 1.18.0 ([#1861](https://github.com/databricks/cli/pull/1861)). + +## [Release] Release v0.231.0 + +CLI: + * Added JSON input validation for CLI commands ([#1771](https://github.com/databricks/cli/pull/1771)). + * Support Git worktrees for `sync` ([#1831](https://github.com/databricks/cli/pull/1831)). + +Bundles: + * Add `bundle summary` to display URLs for deployed resources ([#1731](https://github.com/databricks/cli/pull/1731)). + * Added a warning when incorrect permissions used for `/Workspace/Shared` bundle root ([#1821](https://github.com/databricks/cli/pull/1821)). + * Show actionable errors for collaborative deployment scenarios ([#1386](https://github.com/databricks/cli/pull/1386)). + * Fix path to repository-wide exclude file ([#1837](https://github.com/databricks/cli/pull/1837)). + * Fixed typo in converting cluster permissions ([#1826](https://github.com/databricks/cli/pull/1826)). + * Ignore metastore permission error during template generation ([#1819](https://github.com/databricks/cli/pull/1819)). + * Handle normalization of `dyn.KindTime` into an any type ([#1836](https://github.com/databricks/cli/pull/1836)). + * Added support for pip options in environment dependencies ([#1842](https://github.com/databricks/cli/pull/1842)). + * Fix race condition when restarting continuous jobs ([#1849](https://github.com/databricks/cli/pull/1849)). + * Fix pipeline in default-python template not working for certain workspaces ([#1854](https://github.com/databricks/cli/pull/1854)). + * Add "output" flag to the bundle sync command ([#1853](https://github.com/databricks/cli/pull/1853)). + +Internal: + * Move utility functions dealing with IAM to libs/iamutil ([#1820](https://github.com/databricks/cli/pull/1820)). + * Remove unused `IS_OWNER` constant ([#1823](https://github.com/databricks/cli/pull/1823)). + * Assert SDK version is consistent in the CLI generation process ([#1814](https://github.com/databricks/cli/pull/1814)). + * Fixed unmarshalling json input into `interface{}` type ([#1832](https://github.com/databricks/cli/pull/1832)). + * Fix `TestAccFsMkdirWhenFileExistsAtPath` in isolated Azure environments ([#1833](https://github.com/databricks/cli/pull/1833)). + * Add behavioral tests for examples from the YAML spec ([#1835](https://github.com/databricks/cli/pull/1835)). + * Remove Terraform conversion function that's no longer used ([#1840](https://github.com/databricks/cli/pull/1840)). + * Encode assumptions about the dashboards API in a test ([#1839](https://github.com/databricks/cli/pull/1839)). + * Add script to make testing of code on branches easier ([#1844](https://github.com/databricks/cli/pull/1844)). + +API Changes: + * Added `databricks disable-legacy-dbfs` command group. + +OpenAPI commit cf9c61453990df0f9453670f2fe68e1b128647a2 (2024-10-14) +Dependency updates: + * Upgrade TF provider to 1.54.0 ([#1852](https://github.com/databricks/cli/pull/1852)). + * Bump github.com/databricks/databricks-sdk-go from 0.48.0 to 0.49.0 ([#1843](https://github.com/databricks/cli/pull/1843)). + ## [Release] Release v0.230.0 Notable changes for Databricks Asset Bundles: diff --git a/README.md b/README.md index 51780d0f..3c238702 100644 --- a/README.md +++ b/README.md @@ -35,3 +35,6 @@ docker run -e DATABRICKS_HOST=$YOUR_HOST_URL -e DATABRICKS_TOKEN=$YOUR_TOKEN ghc This CLI follows the Databricks Unified Authentication principles. You can find a detailed description at https://github.com/databricks/databricks-sdk-go#authentication. + +## Privacy Notice +Databricks CLI use is subject to the [Databricks License](https://github.com/databricks/cli/blob/main/LICENSE) and [Databricks Privacy Notice](https://www.databricks.com/legal/privacynotice), including any Usage Data provisions. diff --git a/bundle/config/generate/dashboard.go b/bundle/config/generate/dashboard.go new file mode 100644 index 00000000..46014080 --- /dev/null +++ b/bundle/config/generate/dashboard.go @@ -0,0 +1,18 @@ +package generate + +import ( + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/databricks-sdk-go/service/dashboards" +) + +func ConvertDashboardToValue(dashboard *dashboards.Dashboard, filePath string) (dyn.Value, error) { + // The majority of fields of the dashboard struct are read-only. + // We copy the relevant fields manually. + dv := map[string]dyn.Value{ + "display_name": dyn.NewValue(dashboard.DisplayName, []dyn.Location{{Line: 1}}), + "warehouse_id": dyn.NewValue(dashboard.WarehouseId, []dyn.Location{{Line: 2}}), + "file_path": dyn.NewValue(filePath, []dyn.Location{{Line: 3}}), + } + + return dyn.V(dv), nil +} diff --git a/bundle/config/mutator/apply_presets.go b/bundle/config/mutator/apply_presets.go index 1fd49206..59b8547b 100644 --- a/bundle/config/mutator/apply_presets.go +++ b/bundle/config/mutator/apply_presets.go @@ -212,6 +212,15 @@ func (m *applyPresets) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnos } } + // Dashboards: Prefix + for key, dashboard := range r.Dashboards { + if dashboard == nil || dashboard.Dashboard == nil { + diags = diags.Extend(diag.Errorf("dashboard %s s is not defined", key)) + continue + } + dashboard.DisplayName = prefix + dashboard.DisplayName + } + return diags } diff --git a/bundle/config/mutator/configure_dashboard_defaults.go b/bundle/config/mutator/configure_dashboard_defaults.go new file mode 100644 index 00000000..36ec279d --- /dev/null +++ b/bundle/config/mutator/configure_dashboard_defaults.go @@ -0,0 +1,70 @@ +package mutator + +import ( + "context" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" + "github.com/databricks/cli/libs/dyn" +) + +type configureDashboardDefaults struct{} + +func ConfigureDashboardDefaults() bundle.Mutator { + return &configureDashboardDefaults{} +} + +func (m *configureDashboardDefaults) Name() string { + return "ConfigureDashboardDefaults" +} + +func (m *configureDashboardDefaults) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + var diags diag.Diagnostics + + pattern := dyn.NewPattern( + dyn.Key("resources"), + dyn.Key("dashboards"), + dyn.AnyKey(), + ) + + // Configure defaults for all dashboards. + err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) { + return dyn.MapByPattern(v, pattern, func(p dyn.Path, v dyn.Value) (dyn.Value, error) { + var err error + v, err = setIfNotExists(v, dyn.NewPath(dyn.Key("parent_path")), dyn.V(b.Config.Workspace.ResourcePath)) + if err != nil { + return dyn.InvalidValue, err + } + v, err = setIfNotExists(v, dyn.NewPath(dyn.Key("embed_credentials")), dyn.V(false)) + if err != nil { + return dyn.InvalidValue, err + } + return v, nil + }) + }) + + diags = diags.Extend(diag.FromErr(err)) + return diags +} + +func setIfNotExists(v dyn.Value, path dyn.Path, defaultValue dyn.Value) (dyn.Value, error) { + // Get the field at the specified path (if set). + _, err := dyn.GetByPath(v, path) + switch { + case dyn.IsNoSuchKeyError(err): + // OK, we'll set the default value. + break + case dyn.IsCannotTraverseNilError(err): + // Cannot traverse the value, skip it. + return v, nil + case err == nil: + // The field is set, skip it. + return v, nil + default: + // Return the error. + return v, err + } + + // Set the field at the specified path. + return dyn.SetByPath(v, path, defaultValue) +} diff --git a/bundle/config/mutator/configure_dashboard_defaults_test.go b/bundle/config/mutator/configure_dashboard_defaults_test.go new file mode 100644 index 00000000..2234f9a7 --- /dev/null +++ b/bundle/config/mutator/configure_dashboard_defaults_test.go @@ -0,0 +1,130 @@ +package mutator_test + +import ( + "context" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/config/mutator" + "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/cli/bundle/internal/bundletest" + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/databricks-sdk-go/service/dashboards" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestConfigureDashboardDefaultsParentPath(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Workspace: config.Workspace{ + ResourcePath: "/foo/bar", + }, + Resources: config.Resources{ + Dashboards: map[string]*resources.Dashboard{ + "d1": { + // Empty string is skipped. + // See below for how it is set. + Dashboard: &dashboards.Dashboard{ + ParentPath: "", + }, + }, + "d2": { + // Non-empty string is skipped. + Dashboard: &dashboards.Dashboard{ + ParentPath: "already-set", + }, + }, + "d3": { + // No parent path set. + }, + "d4": nil, + }, + }, + }, + } + + // We can't set an empty string in the typed configuration. + // Do it on the dyn.Value directly. + bundletest.Mutate(t, b, func(v dyn.Value) (dyn.Value, error) { + return dyn.Set(v, "resources.dashboards.d1.parent_path", dyn.V("")) + }) + + diags := bundle.Apply(context.Background(), b, mutator.ConfigureDashboardDefaults()) + require.NoError(t, diags.Error()) + + var v dyn.Value + var err error + + // Set to empty string; unchanged. + v, err = dyn.Get(b.Config.Value(), "resources.dashboards.d1.parent_path") + if assert.NoError(t, err) { + assert.Equal(t, "", v.MustString()) + } + + // Set to "already-set"; unchanged. + v, err = dyn.Get(b.Config.Value(), "resources.dashboards.d2.parent_path") + if assert.NoError(t, err) { + assert.Equal(t, "already-set", v.MustString()) + } + + // Not set; now set to the workspace resource path. + v, err = dyn.Get(b.Config.Value(), "resources.dashboards.d3.parent_path") + if assert.NoError(t, err) { + assert.Equal(t, "/foo/bar", v.MustString()) + } + + // No valid dashboard; no change. + _, err = dyn.Get(b.Config.Value(), "resources.dashboards.d4.parent_path") + assert.True(t, dyn.IsCannotTraverseNilError(err)) +} + +func TestConfigureDashboardDefaultsEmbedCredentials(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Resources: config.Resources{ + Dashboards: map[string]*resources.Dashboard{ + "d1": { + EmbedCredentials: true, + }, + "d2": { + EmbedCredentials: false, + }, + "d3": { + // No parent path set. + }, + "d4": nil, + }, + }, + }, + } + + diags := bundle.Apply(context.Background(), b, mutator.ConfigureDashboardDefaults()) + require.NoError(t, diags.Error()) + + var v dyn.Value + var err error + + // Set to true; still true. + v, err = dyn.Get(b.Config.Value(), "resources.dashboards.d1.embed_credentials") + if assert.NoError(t, err) { + assert.Equal(t, true, v.MustBool()) + } + + // Set to false; still false. + v, err = dyn.Get(b.Config.Value(), "resources.dashboards.d2.embed_credentials") + if assert.NoError(t, err) { + assert.Equal(t, false, v.MustBool()) + } + + // Not set; now false. + v, err = dyn.Get(b.Config.Value(), "resources.dashboards.d3.embed_credentials") + if assert.NoError(t, err) { + assert.Equal(t, false, v.MustBool()) + } + + // No valid dashboard; no change. + _, err = dyn.Get(b.Config.Value(), "resources.dashboards.d4.embed_credentials") + assert.True(t, dyn.IsCannotTraverseNilError(err)) +} diff --git a/bundle/config/mutator/configure_wsfs.go b/bundle/config/mutator/configure_wsfs.go index 1d1bec58..110e1a38 100644 --- a/bundle/config/mutator/configure_wsfs.go +++ b/bundle/config/mutator/configure_wsfs.go @@ -5,14 +5,12 @@ import ( "strings" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/dbr" "github.com/databricks/cli/libs/diag" - "github.com/databricks/cli/libs/env" "github.com/databricks/cli/libs/filer" "github.com/databricks/cli/libs/vfs" ) -const envDatabricksRuntimeVersion = "DATABRICKS_RUNTIME_VERSION" - type configureWSFS struct{} func ConfigureWSFS() bundle.Mutator { @@ -32,7 +30,7 @@ func (m *configureWSFS) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagno } // The executable must be running on DBR. - if _, ok := env.Lookup(ctx, envDatabricksRuntimeVersion); !ok { + if !dbr.RunsOnRuntime(ctx) { return nil } diff --git a/bundle/config/mutator/configure_wsfs_test.go b/bundle/config/mutator/configure_wsfs_test.go new file mode 100644 index 00000000..6f76293e --- /dev/null +++ b/bundle/config/mutator/configure_wsfs_test.go @@ -0,0 +1,65 @@ +package mutator_test + +import ( + "context" + "runtime" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config/mutator" + "github.com/databricks/cli/libs/dbr" + "github.com/databricks/cli/libs/vfs" + "github.com/databricks/databricks-sdk-go/config" + "github.com/databricks/databricks-sdk-go/experimental/mocks" + "github.com/stretchr/testify/assert" +) + +func mockBundleForConfigureWSFS(t *testing.T, syncRootPath string) *bundle.Bundle { + // The native path of the sync root on Windows will never match the /Workspace prefix, + // so the test case for nominal behavior will always fail. + if runtime.GOOS == "windows" { + t.Skip("this test is not applicable on Windows") + } + + b := &bundle.Bundle{ + SyncRoot: vfs.MustNew(syncRootPath), + } + + w := mocks.NewMockWorkspaceClient(t) + w.WorkspaceClient.Config = &config.Config{} + b.SetWorkpaceClient(w.WorkspaceClient) + + return b +} + +func TestConfigureWSFS_SkipsIfNotWorkspacePrefix(t *testing.T) { + b := mockBundleForConfigureWSFS(t, "/foo") + originalSyncRoot := b.SyncRoot + + ctx := context.Background() + diags := bundle.Apply(ctx, b, mutator.ConfigureWSFS()) + assert.Empty(t, diags) + assert.Equal(t, originalSyncRoot, b.SyncRoot) +} + +func TestConfigureWSFS_SkipsIfNotRunningOnRuntime(t *testing.T) { + b := mockBundleForConfigureWSFS(t, "/Workspace/foo") + originalSyncRoot := b.SyncRoot + + ctx := context.Background() + ctx = dbr.MockRuntime(ctx, false) + diags := bundle.Apply(ctx, b, mutator.ConfigureWSFS()) + assert.Empty(t, diags) + assert.Equal(t, originalSyncRoot, b.SyncRoot) +} + +func TestConfigureWSFS_SwapSyncRoot(t *testing.T) { + b := mockBundleForConfigureWSFS(t, "/Workspace/foo") + originalSyncRoot := b.SyncRoot + + ctx := context.Background() + ctx = dbr.MockRuntime(ctx, true) + diags := bundle.Apply(ctx, b, mutator.ConfigureWSFS()) + assert.Empty(t, diags) + assert.NotEqual(t, originalSyncRoot, b.SyncRoot) +} diff --git a/bundle/config/mutator/initialize_urls_test.go b/bundle/config/mutator/initialize_urls_test.go index 71cc153a..ec4e790c 100644 --- a/bundle/config/mutator/initialize_urls_test.go +++ b/bundle/config/mutator/initialize_urls_test.go @@ -8,6 +8,7 @@ import ( "github.com/databricks/cli/bundle/config/resources" "github.com/databricks/databricks-sdk-go/service/catalog" "github.com/databricks/databricks-sdk-go/service/compute" + "github.com/databricks/databricks-sdk-go/service/dashboards" "github.com/databricks/databricks-sdk-go/service/jobs" "github.com/databricks/databricks-sdk-go/service/ml" "github.com/databricks/databricks-sdk-go/service/pipelines" @@ -64,9 +65,8 @@ func TestInitializeURLs(t *testing.T) { }, QualityMonitors: map[string]*resources.QualityMonitor{ "qualityMonitor1": { - CreateMonitor: &catalog.CreateMonitor{ - TableName: "catalog.schema.qualityMonitor1", - }, + TableName: "catalog.schema.qualityMonitor1", + CreateMonitor: &catalog.CreateMonitor{}, }, }, Schemas: map[string]*resources.Schema{ @@ -85,6 +85,14 @@ func TestInitializeURLs(t *testing.T) { }, }, }, + Dashboards: map[string]*resources.Dashboard{ + "dashboard1": { + ID: "01ef8d56871e1d50ae30ce7375e42478", + Dashboard: &dashboards.Dashboard{ + DisplayName: "My special dashboard", + }, + }, + }, }, }, } @@ -99,6 +107,7 @@ func TestInitializeURLs(t *testing.T) { "qualityMonitor1": "https://mycompany.databricks.com/explore/data/catalog/schema/qualityMonitor1?o=123456", "schema1": "https://mycompany.databricks.com/explore/data/catalog/schema?o=123456", "cluster1": "https://mycompany.databricks.com/compute/clusters/1017-103929-vlr7jzcf?o=123456", + "dashboard1": "https://mycompany.databricks.com/dashboardsv3/01ef8d56871e1d50ae30ce7375e42478/published?o=123456", } initializeForWorkspace(b, "123456", "https://mycompany.databricks.com/") diff --git a/bundle/config/mutator/prepend_workspace_prefix.go b/bundle/config/mutator/prepend_workspace_prefix.go index dd467344..e0be2572 100644 --- a/bundle/config/mutator/prepend_workspace_prefix.go +++ b/bundle/config/mutator/prepend_workspace_prefix.go @@ -32,6 +32,7 @@ func (m *prependWorkspacePrefix) Apply(ctx context.Context, b *bundle.Bundle) di dyn.NewPattern(dyn.Key("workspace"), dyn.Key("file_path")), dyn.NewPattern(dyn.Key("workspace"), dyn.Key("artifact_path")), dyn.NewPattern(dyn.Key("workspace"), dyn.Key("state_path")), + dyn.NewPattern(dyn.Key("workspace"), dyn.Key("resource_path")), } err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) { @@ -43,6 +44,11 @@ func (m *prependWorkspacePrefix) Apply(ctx context.Context, b *bundle.Bundle) di return dyn.InvalidValue, fmt.Errorf("expected string, got %s", v.Kind()) } + // Skip prefixing if the path does not start with /, it might be variable reference or smth else. + if !strings.HasPrefix(path, "/") { + return pv, nil + } + for _, prefix := range skipPrefixes { if strings.HasPrefix(path, prefix) { return pv, nil diff --git a/bundle/config/mutator/prepend_workspace_prefix_test.go b/bundle/config/mutator/prepend_workspace_prefix_test.go index 287c694d..31393e6b 100644 --- a/bundle/config/mutator/prepend_workspace_prefix_test.go +++ b/bundle/config/mutator/prepend_workspace_prefix_test.go @@ -31,6 +31,14 @@ func TestPrependWorkspacePrefix(t *testing.T) { path: "/Volumes/Users/test", expected: "/Volumes/Users/test", }, + { + path: "~/test", + expected: "~/test", + }, + { + path: "${workspace.file_path}/test", + expected: "${workspace.file_path}/test", + }, } for _, tc := range testCases { @@ -41,6 +49,7 @@ func TestPrependWorkspacePrefix(t *testing.T) { ArtifactPath: tc.path, FilePath: tc.path, StatePath: tc.path, + ResourcePath: tc.path, }, }, } @@ -51,6 +60,7 @@ func TestPrependWorkspacePrefix(t *testing.T) { require.Equal(t, tc.expected, b.Config.Workspace.ArtifactPath) require.Equal(t, tc.expected, b.Config.Workspace.FilePath) require.Equal(t, tc.expected, b.Config.Workspace.StatePath) + require.Equal(t, tc.expected, b.Config.Workspace.ResourcePath) } } @@ -76,4 +86,5 @@ func TestPrependWorkspaceForDefaultConfig(t *testing.T) { require.Equal(t, "/Workspace/Users/jane@doe.com/.bundle/test/dev/artifacts", b.Config.Workspace.ArtifactPath) require.Equal(t, "/Workspace/Users/jane@doe.com/.bundle/test/dev/files", b.Config.Workspace.FilePath) require.Equal(t, "/Workspace/Users/jane@doe.com/.bundle/test/dev/state", b.Config.Workspace.StatePath) + require.Equal(t, "/Workspace/Users/jane@doe.com/.bundle/test/dev/resources", b.Config.Workspace.ResourcePath) } diff --git a/bundle/config/mutator/process_target_mode_test.go b/bundle/config/mutator/process_target_mode_test.go index b0eb57ee..4135d5fd 100644 --- a/bundle/config/mutator/process_target_mode_test.go +++ b/bundle/config/mutator/process_target_mode_test.go @@ -14,6 +14,7 @@ import ( sdkconfig "github.com/databricks/databricks-sdk-go/config" "github.com/databricks/databricks-sdk-go/service/catalog" "github.com/databricks/databricks-sdk-go/service/compute" + "github.com/databricks/databricks-sdk-go/service/dashboards" "github.com/databricks/databricks-sdk-go/service/iam" "github.com/databricks/databricks-sdk-go/service/jobs" "github.com/databricks/databricks-sdk-go/service/ml" @@ -101,16 +102,23 @@ func mockBundle(mode config.Mode) *bundle.Bundle { "registeredmodel1": {CreateRegisteredModelRequest: &catalog.CreateRegisteredModelRequest{Name: "registeredmodel1"}}, }, QualityMonitors: map[string]*resources.QualityMonitor{ - "qualityMonitor1": {CreateMonitor: &catalog.CreateMonitor{TableName: "qualityMonitor1"}}, - "qualityMonitor2": { + "qualityMonitor1": { + TableName: "qualityMonitor1", CreateMonitor: &catalog.CreateMonitor{ - TableName: "qualityMonitor2", - Schedule: &catalog.MonitorCronSchedule{}, + OutputSchemaName: "catalog.schema", + }, + }, + "qualityMonitor2": { + TableName: "qualityMonitor2", + CreateMonitor: &catalog.CreateMonitor{ + OutputSchemaName: "catalog.schema", + Schedule: &catalog.MonitorCronSchedule{}, }, }, "qualityMonitor3": { + TableName: "qualityMonitor3", CreateMonitor: &catalog.CreateMonitor{ - TableName: "qualityMonitor3", + OutputSchemaName: "catalog.schema", Schedule: &catalog.MonitorCronSchedule{ PauseStatus: catalog.MonitorCronSchedulePauseStatusUnpaused, }, @@ -123,6 +131,13 @@ func mockBundle(mode config.Mode) *bundle.Bundle { Clusters: map[string]*resources.Cluster{ "cluster1": {ClusterSpec: &compute.ClusterSpec{ClusterName: "cluster1", SparkVersion: "13.2.x", NumWorkers: 1}}, }, + Dashboards: map[string]*resources.Dashboard{ + "dashboard1": { + Dashboard: &dashboards.Dashboard{ + DisplayName: "dashboard1", + }, + }, + }, }, }, // Use AWS implementation for testing. @@ -184,6 +199,9 @@ func TestProcessTargetModeDevelopment(t *testing.T) { // Clusters assert.Equal(t, "[dev lennart] cluster1", b.Config.Resources.Clusters["cluster1"].ClusterName) + + // Dashboards + assert.Equal(t, "[dev lennart] dashboard1", b.Config.Resources.Dashboards["dashboard1"].DisplayName) } func TestProcessTargetModeDevelopmentTagNormalizationForAws(t *testing.T) { @@ -272,6 +290,7 @@ func TestValidateDevelopmentMode(t *testing.T) { b.Config.Workspace.StatePath = "/Users/lennart@company.com/.bundle/x/y/state" b.Config.Workspace.FilePath = "/Users/lennart@company.com/.bundle/x/y/files" b.Config.Workspace.ArtifactPath = "/Users/lennart@company.com/.bundle/x/y/artifacts" + b.Config.Workspace.ResourcePath = "/Users/lennart@company.com/.bundle/x/y/resources" diags = validateDevelopmentMode(b) require.NoError(t, diags.Error()) } @@ -300,6 +319,7 @@ func TestProcessTargetModeProduction(t *testing.T) { b.Config.Workspace.StatePath = "/Shared/.bundle/x/y/state" b.Config.Workspace.ArtifactPath = "/Shared/.bundle/x/y/artifacts" b.Config.Workspace.FilePath = "/Shared/.bundle/x/y/files" + b.Config.Workspace.ResourcePath = "/Shared/.bundle/x/y/resources" diags = validateProductionMode(context.Background(), b, false) require.ErrorContains(t, diags.Error(), "production") diff --git a/bundle/config/mutator/run_as.go b/bundle/config/mutator/run_as.go index 6b3069d4..0ca71e28 100644 --- a/bundle/config/mutator/run_as.go +++ b/bundle/config/mutator/run_as.go @@ -110,6 +110,16 @@ func validateRunAs(b *bundle.Bundle) diag.Diagnostics { )) } + // Dashboards do not support run_as in the API. + if len(b.Config.Resources.Dashboards) > 0 { + diags = diags.Extend(reportRunAsNotSupported( + "dashboards", + b.Config.GetLocation("resources.dashboards"), + b.Config.Workspace.CurrentUser.UserName, + identity, + )) + } + return diags } diff --git a/bundle/config/mutator/run_as_test.go b/bundle/config/mutator/run_as_test.go index 8076b82f..acb6c3a4 100644 --- a/bundle/config/mutator/run_as_test.go +++ b/bundle/config/mutator/run_as_test.go @@ -33,6 +33,7 @@ func allResourceTypes(t *testing.T) []string { // also update this check when adding a new resource require.Equal(t, []string{ "clusters", + "dashboards", "experiments", "jobs", "model_serving_endpoints", @@ -188,6 +189,7 @@ func TestRunAsErrorForUnsupportedResources(t *testing.T) { Config: *r, } diags := bundle.Apply(context.Background(), b, SetRunAs()) + require.Error(t, diags.Error()) assert.Contains(t, diags.Error().Error(), "do not support a setting a run_as user that is different from the owner.\n"+ "Current identity: alice. Run as identity: bob.\n"+ "See https://docs.databricks.com/dev-tools/bundles/run-as.html to learn more about the run_as property.", rt) diff --git a/bundle/config/mutator/translate_paths.go b/bundle/config/mutator/translate_paths.go index 5f22570e..321fa5b3 100644 --- a/bundle/config/mutator/translate_paths.go +++ b/bundle/config/mutator/translate_paths.go @@ -162,6 +162,20 @@ func (t *translateContext) translateNoOp(literal, localFullPath, localRelPath, r return localRelPath, nil } +func (t *translateContext) retainLocalAbsoluteFilePath(literal, localFullPath, localRelPath, remotePath string) (string, error) { + info, err := t.b.SyncRoot.Stat(filepath.ToSlash(localRelPath)) + if errors.Is(err, fs.ErrNotExist) { + return "", fmt.Errorf("file %s not found", literal) + } + if err != nil { + return "", fmt.Errorf("unable to determine if %s is a file: %w", localFullPath, err) + } + if info.IsDir() { + return "", fmt.Errorf("expected %s to be a file but found a directory", literal) + } + return localFullPath, nil +} + func (t *translateContext) translateNoOpWithPrefix(literal, localFullPath, localRelPath, remotePath string) (string, error) { if !strings.HasPrefix(localRelPath, ".") { localRelPath = "." + string(filepath.Separator) + localRelPath @@ -215,6 +229,7 @@ func (m *translatePaths) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnos t.applyJobTranslations, t.applyPipelineTranslations, t.applyArtifactTranslations, + t.applyDashboardTranslations, } { v, err = fn(v) if err != nil { diff --git a/bundle/config/mutator/translate_paths_dashboards.go b/bundle/config/mutator/translate_paths_dashboards.go new file mode 100644 index 00000000..93822a59 --- /dev/null +++ b/bundle/config/mutator/translate_paths_dashboards.go @@ -0,0 +1,28 @@ +package mutator + +import ( + "fmt" + + "github.com/databricks/cli/libs/dyn" +) + +func (t *translateContext) applyDashboardTranslations(v dyn.Value) (dyn.Value, error) { + // Convert the `file_path` field to a local absolute path. + // We load the file at this path and use its contents for the dashboard contents. + pattern := dyn.NewPattern( + dyn.Key("resources"), + dyn.Key("dashboards"), + dyn.AnyKey(), + dyn.Key("file_path"), + ) + + return dyn.MapByPattern(v, pattern, func(p dyn.Path, v dyn.Value) (dyn.Value, error) { + key := p[2].Key() + dir, err := v.Location().Directory() + if err != nil { + return dyn.InvalidValue, fmt.Errorf("unable to determine directory for dashboard %s: %w", key, err) + } + + return t.rewriteRelativeTo(p, v, t.retainLocalAbsoluteFilePath, dir, "") + }) +} diff --git a/bundle/config/mutator/translate_paths_dashboards_test.go b/bundle/config/mutator/translate_paths_dashboards_test.go new file mode 100644 index 00000000..5e4e69f5 --- /dev/null +++ b/bundle/config/mutator/translate_paths_dashboards_test.go @@ -0,0 +1,54 @@ +package mutator_test + +import ( + "context" + "path/filepath" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/config/mutator" + "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/cli/bundle/internal/bundletest" + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/vfs" + "github.com/databricks/databricks-sdk-go/service/dashboards" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestTranslatePathsDashboards_FilePathRelativeSubDirectory(t *testing.T) { + dir := t.TempDir() + touchEmptyFile(t, filepath.Join(dir, "src", "my_dashboard.lvdash.json")) + + b := &bundle.Bundle{ + SyncRootPath: dir, + SyncRoot: vfs.MustNew(dir), + Config: config.Root{ + Resources: config.Resources{ + Dashboards: map[string]*resources.Dashboard{ + "dashboard": { + Dashboard: &dashboards.Dashboard{ + DisplayName: "My Dashboard", + }, + FilePath: "../src/my_dashboard.lvdash.json", + }, + }, + }, + }, + } + + bundletest.SetLocation(b, "resources.dashboards", []dyn.Location{{ + File: filepath.Join(dir, "resources/dashboard.yml"), + }}) + + diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths()) + require.NoError(t, diags.Error()) + + // Assert that the file path for the dashboard has been converted to its local absolute path. + assert.Equal( + t, + filepath.Join(dir, "src", "my_dashboard.lvdash.json"), + b.Config.Resources.Dashboards["dashboard"].FilePath, + ) +} diff --git a/bundle/config/resources.go b/bundle/config/resources.go index 9513369e..0affb6ef 100644 --- a/bundle/config/resources.go +++ b/bundle/config/resources.go @@ -21,6 +21,7 @@ type Resources struct { QualityMonitors map[string]*resources.QualityMonitor `json:"quality_monitors,omitempty"` Schemas map[string]*resources.Schema `json:"schemas,omitempty"` Clusters map[string]*resources.Cluster `json:"clusters,omitempty"` + Dashboards map[string]*resources.Dashboard `json:"dashboards,omitempty"` } type ConfigResource interface { @@ -77,6 +78,7 @@ func (r *Resources) AllResources() []ResourceGroup { collectResourceMap(descriptions["quality_monitors"], r.QualityMonitors), collectResourceMap(descriptions["schemas"], r.Schemas), collectResourceMap(descriptions["clusters"], r.Clusters), + collectResourceMap(descriptions["dashboards"], r.Dashboards), } } @@ -175,5 +177,11 @@ func SupportedResources() map[string]ResourceDescription { SingularTitle: "Cluster", PluralTitle: "Clusters", }, + "dashboards": { + SingularName: "dashboard", + PluralName: "dashboards", + SingularTitle: "Dashboard", + PluralTitle: "Dashboards", + }, } } diff --git a/bundle/config/resources/dashboard.go b/bundle/config/resources/dashboard.go new file mode 100644 index 00000000..724b0339 --- /dev/null +++ b/bundle/config/resources/dashboard.go @@ -0,0 +1,81 @@ +package resources + +import ( + "context" + "fmt" + "net/url" + + "github.com/databricks/cli/libs/log" + "github.com/databricks/databricks-sdk-go" + "github.com/databricks/databricks-sdk-go/marshal" + "github.com/databricks/databricks-sdk-go/service/dashboards" +) + +type Dashboard struct { + ID string `json:"id,omitempty" bundle:"readonly"` + Permissions []Permission `json:"permissions,omitempty"` + ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"` + URL string `json:"url,omitempty" bundle:"internal"` + + *dashboards.Dashboard + + // ========================= + // === Additional fields === + // ========================= + + // SerializedDashboard holds the contents of the dashboard in serialized JSON form. + // We override the field's type from the SDK struct here to allow for inlining as YAML. + // If the value is a string, it is used as is. + // If it is not a string, its contents is marshalled as JSON. + SerializedDashboard any `json:"serialized_dashboard,omitempty"` + + // EmbedCredentials is a flag to indicate if the publisher's credentials should + // be embedded in the published dashboard. These embedded credentials will be used + // to execute the published dashboard's queries. + // + // Defaults to false if not set. + EmbedCredentials bool `json:"embed_credentials,omitempty"` + + // FilePath points to the local `.lvdash.json` file containing the dashboard definition. + FilePath string `json:"file_path,omitempty"` +} + +func (r *Dashboard) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, r) +} + +func (r Dashboard) MarshalJSON() ([]byte, error) { + return marshal.Marshal(r) +} + +func (*Dashboard) Exists(ctx context.Context, w *databricks.WorkspaceClient, id string) (bool, error) { + _, err := w.Lakeview.Get(ctx, dashboards.GetDashboardRequest{ + DashboardId: id, + }) + if err != nil { + log.Debugf(ctx, "dashboard %s does not exist", id) + return false, err + } + return true, nil +} + +func (*Dashboard) TerraformResourceName() string { + return "databricks_dashboard" +} + +func (r *Dashboard) InitializeURL(baseURL url.URL) { + if r.ID == "" { + return + } + + baseURL.Path = fmt.Sprintf("dashboardsv3/%s/published", r.ID) + r.URL = baseURL.String() +} + +func (r *Dashboard) GetName() string { + return r.DisplayName +} + +func (r *Dashboard) GetURL() string { + return r.URL +} diff --git a/bundle/config/resources/permission.go b/bundle/config/resources/permission.go index fa2d8796..62e18a09 100644 --- a/bundle/config/resources/permission.go +++ b/bundle/config/resources/permission.go @@ -1,5 +1,7 @@ package resources +import "fmt" + // Permission holds the permission level setting for a single principal. // Multiple of these can be defined on any resource. type Permission struct { @@ -9,3 +11,19 @@ type Permission struct { ServicePrincipalName string `json:"service_principal_name,omitempty"` GroupName string `json:"group_name,omitempty"` } + +func (p Permission) String() string { + if p.UserName != "" { + return fmt.Sprintf("level: %s, user_name: %s", p.Level, p.UserName) + } + + if p.ServicePrincipalName != "" { + return fmt.Sprintf("level: %s, service_principal_name: %s", p.Level, p.ServicePrincipalName) + } + + if p.GroupName != "" { + return fmt.Sprintf("level: %s, group_name: %s", p.Level, p.GroupName) + } + + return fmt.Sprintf("level: %s", p.Level) +} diff --git a/bundle/config/resources/quality_monitor.go b/bundle/config/resources/quality_monitor.go index 3c823e62..30ec4f91 100644 --- a/bundle/config/resources/quality_monitor.go +++ b/bundle/config/resources/quality_monitor.go @@ -13,17 +13,15 @@ import ( ) type QualityMonitor struct { - // Represents the Input Arguments for Terraform and will get - // converted to a HCL representation for CRUD - *catalog.CreateMonitor - - // This represents the id which is the full name of the monitor - // (catalog_name.schema_name.table_name) that can be used - // as a reference in other resources. This value is returned by terraform. - ID string `json:"id,omitempty" bundle:"readonly"` - + ID string `json:"id,omitempty" bundle:"readonly"` ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"` URL string `json:"url,omitempty" bundle:"internal"` + + // The table name is a required field but not included as a JSON field in [catalog.CreateMonitor]. + TableName string `json:"table_name"` + + // This struct defines the creation payload for a monitor. + *catalog.CreateMonitor } func (s *QualityMonitor) UnmarshalJSON(b []byte) error { diff --git a/bundle/config/validate/folder_permissions.go b/bundle/config/validate/folder_permissions.go new file mode 100644 index 00000000..505e82a1 --- /dev/null +++ b/bundle/config/validate/folder_permissions.go @@ -0,0 +1,106 @@ +package validate + +import ( + "context" + "fmt" + "path" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/libraries" + "github.com/databricks/cli/bundle/paths" + "github.com/databricks/cli/bundle/permissions" + "github.com/databricks/cli/libs/diag" + "github.com/databricks/databricks-sdk-go/apierr" + "github.com/databricks/databricks-sdk-go/service/workspace" + "golang.org/x/sync/errgroup" +) + +type folderPermissions struct { +} + +// Apply implements bundle.ReadOnlyMutator. +func (f *folderPermissions) Apply(ctx context.Context, b bundle.ReadOnlyBundle) diag.Diagnostics { + if len(b.Config().Permissions) == 0 { + return nil + } + + bundlePaths := paths.CollectUniqueWorkspacePathPrefixes(b.Config().Workspace) + + var diags diag.Diagnostics + g, ctx := errgroup.WithContext(ctx) + results := make([]diag.Diagnostics, len(bundlePaths)) + for i, p := range bundlePaths { + g.Go(func() error { + results[i] = checkFolderPermission(ctx, b, p) + return nil + }) + } + + if err := g.Wait(); err != nil { + return diag.FromErr(err) + } + + for _, r := range results { + diags = diags.Extend(r) + } + + return diags +} + +func checkFolderPermission(ctx context.Context, b bundle.ReadOnlyBundle, folderPath string) diag.Diagnostics { + // If the folder is shared, then we don't need to check permissions as it was already checked in the other mutator before. + if libraries.IsWorkspaceSharedPath(folderPath) { + return nil + } + + w := b.WorkspaceClient().Workspace + obj, err := getClosestExistingObject(ctx, w, folderPath) + if err != nil { + return diag.FromErr(err) + } + + objPermissions, err := w.GetPermissions(ctx, workspace.GetWorkspaceObjectPermissionsRequest{ + WorkspaceObjectId: fmt.Sprint(obj.ObjectId), + WorkspaceObjectType: "directories", + }) + if err != nil { + return diag.FromErr(err) + } + + p := permissions.ObjectAclToResourcePermissions(folderPath, objPermissions.AccessControlList) + return p.Compare(b.Config().Permissions) +} + +func getClosestExistingObject(ctx context.Context, w workspace.WorkspaceInterface, folderPath string) (*workspace.ObjectInfo, error) { + for { + obj, err := w.GetStatusByPath(ctx, folderPath) + if err == nil { + return obj, nil + } + + if !apierr.IsMissing(err) { + return nil, err + } + + parent := path.Dir(folderPath) + // If the parent is the same as the current folder, then we have reached the root + if folderPath == parent { + break + } + + folderPath = parent + } + + return nil, fmt.Errorf("folder %s and its parent folders do not exist", folderPath) +} + +// Name implements bundle.ReadOnlyMutator. +func (f *folderPermissions) Name() string { + return "validate:folder_permissions" +} + +// ValidateFolderPermissions validates that permissions for the folders in Workspace file system matches +// the permissions in the top-level permissions section of the bundle. +func ValidateFolderPermissions() bundle.ReadOnlyMutator { + return &folderPermissions{} +} diff --git a/bundle/config/validate/folder_permissions_test.go b/bundle/config/validate/folder_permissions_test.go new file mode 100644 index 00000000..8e68c9fb --- /dev/null +++ b/bundle/config/validate/folder_permissions_test.go @@ -0,0 +1,208 @@ +package validate + +import ( + "context" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/cli/bundle/permissions" + "github.com/databricks/cli/libs/diag" + "github.com/databricks/databricks-sdk-go/apierr" + "github.com/databricks/databricks-sdk-go/experimental/mocks" + "github.com/databricks/databricks-sdk-go/service/workspace" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +func TestFolderPermissionsInheritedWhenRootPathDoesNotExist(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Workspace: config.Workspace{ + RootPath: "/Workspace/Users/foo@bar.com", + ArtifactPath: "/Workspace/Users/otherfoo@bar.com/artifacts", + FilePath: "/Workspace/Users/foo@bar.com/files", + StatePath: "/Workspace/Users/foo@bar.com/state", + ResourcePath: "/Workspace/Users/foo@bar.com/resources", + }, + Permissions: []resources.Permission{ + {Level: permissions.CAN_MANAGE, UserName: "foo@bar.com"}, + }, + }, + } + m := mocks.NewMockWorkspaceClient(t) + api := m.GetMockWorkspaceAPI() + api.EXPECT().GetStatusByPath(mock.Anything, "/Workspace/Users/otherfoo@bar.com/artifacts").Return(nil, &apierr.APIError{ + StatusCode: 404, + ErrorCode: "RESOURCE_DOES_NOT_EXIST", + }) + api.EXPECT().GetStatusByPath(mock.Anything, "/Workspace/Users/otherfoo@bar.com").Return(nil, &apierr.APIError{ + StatusCode: 404, + ErrorCode: "RESOURCE_DOES_NOT_EXIST", + }) + api.EXPECT().GetStatusByPath(mock.Anything, "/Workspace/Users/foo@bar.com").Return(nil, &apierr.APIError{ + StatusCode: 404, + ErrorCode: "RESOURCE_DOES_NOT_EXIST", + }) + api.EXPECT().GetStatusByPath(mock.Anything, "/Workspace/Users").Return(nil, &apierr.APIError{ + StatusCode: 404, + ErrorCode: "RESOURCE_DOES_NOT_EXIST", + }) + api.EXPECT().GetStatusByPath(mock.Anything, "/Workspace").Return(&workspace.ObjectInfo{ + ObjectId: 1234, + }, nil) + + api.EXPECT().GetPermissions(mock.Anything, workspace.GetWorkspaceObjectPermissionsRequest{ + WorkspaceObjectId: "1234", + WorkspaceObjectType: "directories", + }).Return(&workspace.WorkspaceObjectPermissions{ + ObjectId: "1234", + AccessControlList: []workspace.WorkspaceObjectAccessControlResponse{ + { + UserName: "foo@bar.com", + AllPermissions: []workspace.WorkspaceObjectPermission{ + {PermissionLevel: "CAN_MANAGE"}, + }, + }, + }, + }, nil) + + b.SetWorkpaceClient(m.WorkspaceClient) + rb := bundle.ReadOnly(b) + + diags := bundle.ApplyReadOnly(context.Background(), rb, ValidateFolderPermissions()) + require.Empty(t, diags) +} + +func TestValidateFolderPermissionsFailsOnMissingBundlePermission(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Workspace: config.Workspace{ + RootPath: "/Workspace/Users/foo@bar.com", + ArtifactPath: "/Workspace/Users/foo@bar.com/artifacts", + FilePath: "/Workspace/Users/foo@bar.com/files", + StatePath: "/Workspace/Users/foo@bar.com/state", + ResourcePath: "/Workspace/Users/foo@bar.com/resources", + }, + Permissions: []resources.Permission{ + {Level: permissions.CAN_MANAGE, UserName: "foo@bar.com"}, + }, + }, + } + m := mocks.NewMockWorkspaceClient(t) + api := m.GetMockWorkspaceAPI() + api.EXPECT().GetStatusByPath(mock.Anything, "/Workspace/Users/foo@bar.com").Return(&workspace.ObjectInfo{ + ObjectId: 1234, + }, nil) + + api.EXPECT().GetPermissions(mock.Anything, workspace.GetWorkspaceObjectPermissionsRequest{ + WorkspaceObjectId: "1234", + WorkspaceObjectType: "directories", + }).Return(&workspace.WorkspaceObjectPermissions{ + ObjectId: "1234", + AccessControlList: []workspace.WorkspaceObjectAccessControlResponse{ + { + UserName: "foo@bar.com", + AllPermissions: []workspace.WorkspaceObjectPermission{ + {PermissionLevel: "CAN_MANAGE"}, + }, + }, + { + UserName: "foo2@bar.com", + AllPermissions: []workspace.WorkspaceObjectPermission{ + {PermissionLevel: "CAN_MANAGE"}, + }, + }, + }, + }, nil) + + b.SetWorkpaceClient(m.WorkspaceClient) + rb := bundle.ReadOnly(b) + + diags := bundle.ApplyReadOnly(context.Background(), rb, ValidateFolderPermissions()) + require.Len(t, diags, 1) + require.Equal(t, "untracked permissions apply to target workspace path", diags[0].Summary) + require.Equal(t, diag.Warning, diags[0].Severity) + require.Equal(t, "The following permissions apply to the workspace folder at \"/Workspace/Users/foo@bar.com\" but are not configured in the bundle:\n- level: CAN_MANAGE, user_name: foo2@bar.com\n", diags[0].Detail) +} + +func TestValidateFolderPermissionsFailsOnPermissionMismatch(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Workspace: config.Workspace{ + RootPath: "/Workspace/Users/foo@bar.com", + ArtifactPath: "/Workspace/Users/foo@bar.com/artifacts", + FilePath: "/Workspace/Users/foo@bar.com/files", + StatePath: "/Workspace/Users/foo@bar.com/state", + ResourcePath: "/Workspace/Users/foo@bar.com/resources", + }, + Permissions: []resources.Permission{ + {Level: permissions.CAN_MANAGE, UserName: "foo@bar.com"}, + }, + }, + } + m := mocks.NewMockWorkspaceClient(t) + api := m.GetMockWorkspaceAPI() + api.EXPECT().GetStatusByPath(mock.Anything, "/Workspace/Users/foo@bar.com").Return(&workspace.ObjectInfo{ + ObjectId: 1234, + }, nil) + + api.EXPECT().GetPermissions(mock.Anything, workspace.GetWorkspaceObjectPermissionsRequest{ + WorkspaceObjectId: "1234", + WorkspaceObjectType: "directories", + }).Return(&workspace.WorkspaceObjectPermissions{ + ObjectId: "1234", + AccessControlList: []workspace.WorkspaceObjectAccessControlResponse{ + { + UserName: "foo2@bar.com", + AllPermissions: []workspace.WorkspaceObjectPermission{ + {PermissionLevel: "CAN_MANAGE"}, + }, + }, + }, + }, nil) + + b.SetWorkpaceClient(m.WorkspaceClient) + rb := bundle.ReadOnly(b) + + diags := bundle.ApplyReadOnly(context.Background(), rb, ValidateFolderPermissions()) + require.Len(t, diags, 1) + require.Equal(t, "untracked permissions apply to target workspace path", diags[0].Summary) + require.Equal(t, diag.Warning, diags[0].Severity) +} + +func TestValidateFolderPermissionsFailsOnNoRootFolder(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Workspace: config.Workspace{ + RootPath: "/NotExisting", + ArtifactPath: "/NotExisting/artifacts", + FilePath: "/NotExisting/files", + StatePath: "/NotExisting/state", + ResourcePath: "/NotExisting/resources", + }, + Permissions: []resources.Permission{ + {Level: permissions.CAN_MANAGE, UserName: "foo@bar.com"}, + }, + }, + } + m := mocks.NewMockWorkspaceClient(t) + api := m.GetMockWorkspaceAPI() + api.EXPECT().GetStatusByPath(mock.Anything, "/NotExisting").Return(nil, &apierr.APIError{ + StatusCode: 404, + ErrorCode: "RESOURCE_DOES_NOT_EXIST", + }) + api.EXPECT().GetStatusByPath(mock.Anything, "/").Return(nil, &apierr.APIError{ + StatusCode: 404, + ErrorCode: "RESOURCE_DOES_NOT_EXIST", + }) + + b.SetWorkpaceClient(m.WorkspaceClient) + rb := bundle.ReadOnly(b) + + diags := bundle.ApplyReadOnly(context.Background(), rb, ValidateFolderPermissions()) + require.Len(t, diags, 1) + require.Equal(t, "folder / and its parent folders do not exist", diags[0].Summary) + require.Equal(t, diag.Error, diags[0].Severity) +} diff --git a/bundle/config/validate/validate.go b/bundle/config/validate/validate.go index 79f42bd2..440477e6 100644 --- a/bundle/config/validate/validate.go +++ b/bundle/config/validate/validate.go @@ -35,6 +35,7 @@ func (v *validate) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics FilesToSync(), ValidateSyncPatterns(), JobTaskClusterSpec(), + ValidateFolderPermissions(), )) } diff --git a/bundle/deploy/terraform/check_dashboards_modified_remotely.go b/bundle/deploy/terraform/check_dashboards_modified_remotely.go new file mode 100644 index 00000000..c884bcb9 --- /dev/null +++ b/bundle/deploy/terraform/check_dashboards_modified_remotely.go @@ -0,0 +1,117 @@ +package terraform + +import ( + "context" + "fmt" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" + "github.com/databricks/cli/libs/dyn" + tfjson "github.com/hashicorp/terraform-json" +) + +type dashboardState struct { + Name string + ID string + ETag string +} + +func collectDashboardsFromState(ctx context.Context, b *bundle.Bundle) ([]dashboardState, error) { + state, err := ParseResourcesState(ctx, b) + if err != nil && state == nil { + return nil, err + } + + var dashboards []dashboardState + for _, resource := range state.Resources { + if resource.Mode != tfjson.ManagedResourceMode { + continue + } + for _, instance := range resource.Instances { + switch resource.Type { + case "databricks_dashboard": + dashboards = append(dashboards, dashboardState{ + Name: resource.Name, + ID: instance.Attributes.ID, + ETag: instance.Attributes.ETag, + }) + } + } + } + + return dashboards, nil +} + +type checkDashboardsModifiedRemotely struct { +} + +func (l *checkDashboardsModifiedRemotely) Name() string { + return "CheckDashboardsModifiedRemotely" +} + +func (l *checkDashboardsModifiedRemotely) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + // This mutator is relevant only if the bundle includes dashboards. + if len(b.Config.Resources.Dashboards) == 0 { + return nil + } + + // If the user has forced the deployment, skip this check. + if b.Config.Bundle.Force { + return nil + } + + dashboards, err := collectDashboardsFromState(ctx, b) + if err != nil { + return diag.FromErr(err) + } + + var diags diag.Diagnostics + for _, dashboard := range dashboards { + // Skip dashboards that are not defined in the bundle. + // These will be destroyed upon deployment. + if _, ok := b.Config.Resources.Dashboards[dashboard.Name]; !ok { + continue + } + + path := dyn.MustPathFromString(fmt.Sprintf("resources.dashboards.%s", dashboard.Name)) + loc := b.Config.GetLocation(path.String()) + actual, err := b.WorkspaceClient().Lakeview.GetByDashboardId(ctx, dashboard.ID) + if err != nil { + diags = diags.Append(diag.Diagnostic{ + Severity: diag.Error, + Summary: fmt.Sprintf("failed to get dashboard %q", dashboard.Name), + Detail: err.Error(), + Paths: []dyn.Path{path}, + Locations: []dyn.Location{loc}, + }) + continue + } + + // If the ETag is the same, the dashboard has not been modified. + if actual.Etag == dashboard.ETag { + continue + } + + diags = diags.Append(diag.Diagnostic{ + Severity: diag.Error, + Summary: fmt.Sprintf("dashboard %q has been modified remotely", dashboard.Name), + Detail: "" + + "This dashboard has been modified remotely since the last bundle deployment.\n" + + "These modifications are untracked and will be overwritten on deploy.\n" + + "\n" + + "Make sure that the local dashboard definition matches what you intend to deploy\n" + + "before proceeding with the deployment.\n" + + "\n" + + "Run `databricks bundle deploy --force` to bypass this error." + + "", + Paths: []dyn.Path{path}, + Locations: []dyn.Location{loc}, + }) + } + + return diags +} + +func CheckDashboardsModifiedRemotely() *checkDashboardsModifiedRemotely { + return &checkDashboardsModifiedRemotely{} +} diff --git a/bundle/deploy/terraform/check_dashboards_modified_remotely_test.go b/bundle/deploy/terraform/check_dashboards_modified_remotely_test.go new file mode 100644 index 00000000..25aee125 --- /dev/null +++ b/bundle/deploy/terraform/check_dashboards_modified_remotely_test.go @@ -0,0 +1,191 @@ +package terraform + +import ( + "context" + "fmt" + "path/filepath" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/cli/internal/testutil" + "github.com/databricks/cli/libs/diag" + "github.com/databricks/databricks-sdk-go/experimental/mocks" + "github.com/databricks/databricks-sdk-go/service/dashboards" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +func mockDashboardBundle(t *testing.T) *bundle.Bundle { + dir := t.TempDir() + b := &bundle.Bundle{ + BundleRootPath: dir, + Config: config.Root{ + Bundle: config.Bundle{ + Target: "test", + }, + Resources: config.Resources{ + Dashboards: map[string]*resources.Dashboard{ + "dash1": { + Dashboard: &dashboards.Dashboard{ + DisplayName: "My Special Dashboard", + }, + }, + }, + }, + }, + } + return b +} + +func TestCheckDashboardsModifiedRemotely_NoDashboards(t *testing.T) { + dir := t.TempDir() + b := &bundle.Bundle{ + BundleRootPath: dir, + Config: config.Root{ + Bundle: config.Bundle{ + Target: "test", + }, + Resources: config.Resources{}, + }, + } + + diags := bundle.Apply(context.Background(), b, CheckDashboardsModifiedRemotely()) + assert.Empty(t, diags) +} + +func TestCheckDashboardsModifiedRemotely_FirstDeployment(t *testing.T) { + b := mockDashboardBundle(t) + diags := bundle.Apply(context.Background(), b, CheckDashboardsModifiedRemotely()) + assert.Empty(t, diags) +} + +func TestCheckDashboardsModifiedRemotely_ExistingStateNoChange(t *testing.T) { + ctx := context.Background() + + b := mockDashboardBundle(t) + writeFakeDashboardState(t, ctx, b) + + // Mock the call to the API. + m := mocks.NewMockWorkspaceClient(t) + dashboardsAPI := m.GetMockLakeviewAPI() + dashboardsAPI.EXPECT(). + GetByDashboardId(mock.Anything, "id1"). + Return(&dashboards.Dashboard{ + DisplayName: "My Special Dashboard", + Etag: "1000", + }, nil). + Once() + b.SetWorkpaceClient(m.WorkspaceClient) + + // No changes, so no diags. + diags := bundle.Apply(ctx, b, CheckDashboardsModifiedRemotely()) + assert.Empty(t, diags) +} + +func TestCheckDashboardsModifiedRemotely_ExistingStateChange(t *testing.T) { + ctx := context.Background() + + b := mockDashboardBundle(t) + writeFakeDashboardState(t, ctx, b) + + // Mock the call to the API. + m := mocks.NewMockWorkspaceClient(t) + dashboardsAPI := m.GetMockLakeviewAPI() + dashboardsAPI.EXPECT(). + GetByDashboardId(mock.Anything, "id1"). + Return(&dashboards.Dashboard{ + DisplayName: "My Special Dashboard", + Etag: "1234", + }, nil). + Once() + b.SetWorkpaceClient(m.WorkspaceClient) + + // The dashboard has changed, so expect an error. + diags := bundle.Apply(ctx, b, CheckDashboardsModifiedRemotely()) + if assert.Len(t, diags, 1) { + assert.Equal(t, diag.Error, diags[0].Severity) + assert.Equal(t, `dashboard "dash1" has been modified remotely`, diags[0].Summary) + } +} + +func TestCheckDashboardsModifiedRemotely_ExistingStateFailureToGet(t *testing.T) { + ctx := context.Background() + + b := mockDashboardBundle(t) + writeFakeDashboardState(t, ctx, b) + + // Mock the call to the API. + m := mocks.NewMockWorkspaceClient(t) + dashboardsAPI := m.GetMockLakeviewAPI() + dashboardsAPI.EXPECT(). + GetByDashboardId(mock.Anything, "id1"). + Return(nil, fmt.Errorf("failure")). + Once() + b.SetWorkpaceClient(m.WorkspaceClient) + + // Unable to get the dashboard, so expect an error. + diags := bundle.Apply(ctx, b, CheckDashboardsModifiedRemotely()) + if assert.Len(t, diags, 1) { + assert.Equal(t, diag.Error, diags[0].Severity) + assert.Equal(t, `failed to get dashboard "dash1"`, diags[0].Summary) + } +} + +func writeFakeDashboardState(t *testing.T, ctx context.Context, b *bundle.Bundle) { + tfDir, err := Dir(ctx, b) + require.NoError(t, err) + + // Write fake state file. + testutil.WriteFile(t, ` + { + "version": 4, + "terraform_version": "1.5.5", + "resources": [ + { + "mode": "managed", + "type": "databricks_dashboard", + "name": "dash1", + "instances": [ + { + "schema_version": 0, + "attributes": { + "etag": "1000", + "id": "id1" + } + } + ] + }, + { + "mode": "managed", + "type": "databricks_job", + "name": "job", + "instances": [ + { + "schema_version": 0, + "attributes": { + "id": "1234" + } + } + ] + }, + { + "mode": "managed", + "type": "databricks_dashboard", + "name": "dash2", + "instances": [ + { + "schema_version": 0, + "attributes": { + "etag": "1001", + "id": "id2" + } + } + ] + } + ] + } + `, filepath.Join(tfDir, TerraformStateFileName)) +} diff --git a/bundle/deploy/terraform/convert.go b/bundle/deploy/terraform/convert.go index 0ba8bb1f..0ace7c66 100644 --- a/bundle/deploy/terraform/convert.go +++ b/bundle/deploy/terraform/convert.go @@ -176,6 +176,16 @@ func TerraformToBundle(state *resourcesState, config *config.Root) error { } cur.ID = instance.Attributes.ID config.Resources.Clusters[resource.Name] = cur + case "databricks_dashboard": + if config.Resources.Dashboards == nil { + config.Resources.Dashboards = make(map[string]*resources.Dashboard) + } + cur := config.Resources.Dashboards[resource.Name] + if cur == nil { + cur = &resources.Dashboard{ModifiedStatus: resources.ModifiedStatusDeleted} + } + cur.ID = instance.Attributes.ID + config.Resources.Dashboards[resource.Name] = cur case "databricks_permissions": case "databricks_grants": // Ignore; no need to pull these back into the configuration. @@ -230,6 +240,11 @@ func TerraformToBundle(state *resourcesState, config *config.Root) error { src.ModifiedStatus = resources.ModifiedStatusCreated } } + for _, src := range config.Resources.Dashboards { + if src.ModifiedStatus == "" && src.ID == "" { + src.ModifiedStatus = resources.ModifiedStatusCreated + } + } return nil } diff --git a/bundle/deploy/terraform/convert_test.go b/bundle/deploy/terraform/convert_test.go index 575ff00b..6ed34d43 100644 --- a/bundle/deploy/terraform/convert_test.go +++ b/bundle/deploy/terraform/convert_test.go @@ -12,6 +12,7 @@ import ( "github.com/databricks/cli/libs/dyn/convert" "github.com/databricks/databricks-sdk-go/service/catalog" "github.com/databricks/databricks-sdk-go/service/compute" + "github.com/databricks/databricks-sdk-go/service/dashboards" "github.com/databricks/databricks-sdk-go/service/jobs" "github.com/databricks/databricks-sdk-go/service/ml" "github.com/databricks/databricks-sdk-go/service/pipelines" @@ -677,6 +678,14 @@ func TestTerraformToBundleEmptyLocalResources(t *testing.T) { {Attributes: stateInstanceAttributes{ID: "1"}}, }, }, + { + Type: "databricks_dashboard", + Mode: "managed", + Name: "test_dashboard", + Instances: []stateResourceInstance{ + {Attributes: stateInstanceAttributes{ID: "1"}}, + }, + }, }, } err := TerraformToBundle(&tfState, &config) @@ -709,6 +718,9 @@ func TestTerraformToBundleEmptyLocalResources(t *testing.T) { assert.Equal(t, "1", config.Resources.Clusters["test_cluster"].ID) assert.Equal(t, resources.ModifiedStatusDeleted, config.Resources.Clusters["test_cluster"].ModifiedStatus) + assert.Equal(t, "1", config.Resources.Dashboards["test_dashboard"].ID) + assert.Equal(t, resources.ModifiedStatusDeleted, config.Resources.Dashboards["test_dashboard"].ModifiedStatus) + AssertFullResourceCoverage(t, &config) } @@ -778,6 +790,13 @@ func TestTerraformToBundleEmptyRemoteResources(t *testing.T) { }, }, }, + Dashboards: map[string]*resources.Dashboard{ + "test_dashboard": { + Dashboard: &dashboards.Dashboard{ + DisplayName: "test_dashboard", + }, + }, + }, }, } var tfState = resourcesState{ @@ -813,6 +832,9 @@ func TestTerraformToBundleEmptyRemoteResources(t *testing.T) { assert.Equal(t, "", config.Resources.Clusters["test_cluster"].ID) assert.Equal(t, resources.ModifiedStatusCreated, config.Resources.Clusters["test_cluster"].ModifiedStatus) + assert.Equal(t, "", config.Resources.Dashboards["test_dashboard"].ID) + assert.Equal(t, resources.ModifiedStatusCreated, config.Resources.Dashboards["test_dashboard"].ModifiedStatus) + AssertFullResourceCoverage(t, &config) } @@ -927,6 +949,18 @@ func TestTerraformToBundleModifiedResources(t *testing.T) { }, }, }, + Dashboards: map[string]*resources.Dashboard{ + "test_dashboard": { + Dashboard: &dashboards.Dashboard{ + DisplayName: "test_dashboard", + }, + }, + "test_dashboard_new": { + Dashboard: &dashboards.Dashboard{ + DisplayName: "test_dashboard_new", + }, + }, + }, }, } var tfState = resourcesState{ @@ -1075,6 +1109,22 @@ func TestTerraformToBundleModifiedResources(t *testing.T) { {Attributes: stateInstanceAttributes{ID: "2"}}, }, }, + { + Type: "databricks_dashboard", + Mode: "managed", + Name: "test_dashboard", + Instances: []stateResourceInstance{ + {Attributes: stateInstanceAttributes{ID: "1"}}, + }, + }, + { + Type: "databricks_dashboard", + Mode: "managed", + Name: "test_dashboard_old", + Instances: []stateResourceInstance{ + {Attributes: stateInstanceAttributes{ID: "2"}}, + }, + }, }, } err := TerraformToBundle(&tfState, &config) @@ -1143,6 +1193,13 @@ func TestTerraformToBundleModifiedResources(t *testing.T) { assert.Equal(t, "", config.Resources.Clusters["test_cluster_new"].ID) assert.Equal(t, resources.ModifiedStatusCreated, config.Resources.Clusters["test_cluster_new"].ModifiedStatus) + assert.Equal(t, "1", config.Resources.Dashboards["test_dashboard"].ID) + assert.Equal(t, "", config.Resources.Dashboards["test_dashboard"].ModifiedStatus) + assert.Equal(t, "2", config.Resources.Dashboards["test_dashboard_old"].ID) + assert.Equal(t, resources.ModifiedStatusDeleted, config.Resources.Dashboards["test_dashboard_old"].ModifiedStatus) + assert.Equal(t, "", config.Resources.Dashboards["test_dashboard_new"].ID) + assert.Equal(t, resources.ModifiedStatusCreated, config.Resources.Dashboards["test_dashboard_new"].ModifiedStatus) + AssertFullResourceCoverage(t, &config) } diff --git a/bundle/deploy/terraform/interpolate.go b/bundle/deploy/terraform/interpolate.go index 12894c68..eb15c63e 100644 --- a/bundle/deploy/terraform/interpolate.go +++ b/bundle/deploy/terraform/interpolate.go @@ -60,6 +60,8 @@ func (m *interpolateMutator) Apply(ctx context.Context, b *bundle.Bundle) diag.D path = dyn.NewPath(dyn.Key("databricks_schema")).Append(path[2:]...) case dyn.Key("clusters"): path = dyn.NewPath(dyn.Key("databricks_cluster")).Append(path[2:]...) + case dyn.Key("dashboards"): + path = dyn.NewPath(dyn.Key("databricks_dashboard")).Append(path[2:]...) default: // Trigger "key not found" for unknown resource types. return dyn.GetByPath(root, path) diff --git a/bundle/deploy/terraform/interpolate_test.go b/bundle/deploy/terraform/interpolate_test.go index 630a904a..b26ef928 100644 --- a/bundle/deploy/terraform/interpolate_test.go +++ b/bundle/deploy/terraform/interpolate_test.go @@ -32,6 +32,7 @@ func TestInterpolate(t *testing.T) { "other_registered_model": "${resources.registered_models.other_registered_model.id}", "other_schema": "${resources.schemas.other_schema.id}", "other_cluster": "${resources.clusters.other_cluster.id}", + "other_dashboard": "${resources.dashboards.other_dashboard.id}", }, Tasks: []jobs.Task{ { @@ -69,6 +70,7 @@ func TestInterpolate(t *testing.T) { assert.Equal(t, "${databricks_registered_model.other_registered_model.id}", j.Tags["other_registered_model"]) assert.Equal(t, "${databricks_schema.other_schema.id}", j.Tags["other_schema"]) assert.Equal(t, "${databricks_cluster.other_cluster.id}", j.Tags["other_cluster"]) + assert.Equal(t, "${databricks_dashboard.other_dashboard.id}", j.Tags["other_dashboard"]) m := b.Config.Resources.Models["my_model"] assert.Equal(t, "my_model", m.Model.Name) diff --git a/bundle/deploy/terraform/tfdyn/convert_dashboard.go b/bundle/deploy/terraform/tfdyn/convert_dashboard.go new file mode 100644 index 00000000..3ba7e19a --- /dev/null +++ b/bundle/deploy/terraform/tfdyn/convert_dashboard.go @@ -0,0 +1,109 @@ +package tfdyn + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/databricks/cli/bundle/internal/tf/schema" + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/dyn/convert" + "github.com/databricks/cli/libs/log" +) + +const ( + filePathFieldName = "file_path" + serializedDashboardFieldName = "serialized_dashboard" +) + +// Marshal "serialized_dashboard" as JSON if it is set in the input but not in the output. +func marshalSerializedDashboard(vin dyn.Value, vout dyn.Value) (dyn.Value, error) { + // Skip if the "serialized_dashboard" field is already set. + if v := vout.Get(serializedDashboardFieldName); v.IsValid() { + return vout, nil + } + + // Skip if the "serialized_dashboard" field on the input is not set. + v := vin.Get(serializedDashboardFieldName) + if !v.IsValid() { + return vout, nil + } + + // Marshal the "serialized_dashboard" field as JSON. + data, err := json.Marshal(v.AsAny()) + if err != nil { + return dyn.InvalidValue, fmt.Errorf("failed to marshal serialized_dashboard: %w", err) + } + + // Set the "serialized_dashboard" field on the output. + return dyn.Set(vout, serializedDashboardFieldName, dyn.V(string(data))) +} + +func convertDashboardResource(ctx context.Context, vin dyn.Value) (dyn.Value, error) { + var err error + + // Normalize the output value to the target schema. + vout, diags := convert.Normalize(schema.ResourceDashboard{}, vin) + for _, diag := range diags { + log.Debugf(ctx, "dashboard normalization diagnostic: %s", diag.Summary) + } + + // Include "serialized_dashboard" field if "file_path" is set. + // Note: the Terraform resource supports "file_path" natively, but its + // change detection mechanism doesn't work as expected at the time of writing (Sep 30). + if path, ok := vout.Get(filePathFieldName).AsString(); ok { + vout, err = dyn.Set(vout, serializedDashboardFieldName, dyn.V(fmt.Sprintf("${file(%q)}", path))) + if err != nil { + return dyn.InvalidValue, fmt.Errorf("failed to set serialized_dashboard: %w", err) + } + // Drop the "file_path" field. It is mutually exclusive with "serialized_dashboard". + vout, err = dyn.Walk(vout, func(p dyn.Path, v dyn.Value) (dyn.Value, error) { + switch len(p) { + case 0: + return v, nil + case 1: + if p[0] == dyn.Key(filePathFieldName) { + return v, dyn.ErrDrop + } + } + + // Skip everything else. + return v, dyn.ErrSkip + }) + if err != nil { + return dyn.InvalidValue, fmt.Errorf("failed to drop file_path: %w", err) + } + } + + // Marshal "serialized_dashboard" as JSON if it is set in the input but not in the output. + vout, err = marshalSerializedDashboard(vin, vout) + if err != nil { + return dyn.InvalidValue, err + } + + return vout, nil +} + +type dashboardConverter struct{} + +func (dashboardConverter) Convert(ctx context.Context, key string, vin dyn.Value, out *schema.Resources) error { + vout, err := convertDashboardResource(ctx, vin) + if err != nil { + return err + } + + // Add the converted resource to the output. + out.Dashboard[key] = vout.AsAny() + + // Configure permissions for this resource. + if permissions := convertPermissionsResource(ctx, vin); permissions != nil { + permissions.DashboardId = fmt.Sprintf("${databricks_dashboard.%s.id}", key) + out.Permissions["dashboard_"+key] = permissions + } + + return nil +} + +func init() { + registerConverter("dashboards", dashboardConverter{}) +} diff --git a/bundle/deploy/terraform/tfdyn/convert_dashboard_test.go b/bundle/deploy/terraform/tfdyn/convert_dashboard_test.go new file mode 100644 index 00000000..539ba21a --- /dev/null +++ b/bundle/deploy/terraform/tfdyn/convert_dashboard_test.go @@ -0,0 +1,153 @@ +package tfdyn + +import ( + "context" + "testing" + + "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/cli/bundle/internal/tf/schema" + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/dyn/convert" + "github.com/databricks/databricks-sdk-go/service/dashboards" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestConvertDashboard(t *testing.T) { + var src = resources.Dashboard{ + Dashboard: &dashboards.Dashboard{ + DisplayName: "my dashboard", + WarehouseId: "f00dcafe", + ParentPath: "/some/path", + }, + + EmbedCredentials: true, + + Permissions: []resources.Permission{ + { + Level: "CAN_VIEW", + UserName: "jane@doe.com", + }, + }, + } + + vin, err := convert.FromTyped(src, dyn.NilValue) + require.NoError(t, err) + + ctx := context.Background() + out := schema.NewResources() + err = dashboardConverter{}.Convert(ctx, "my_dashboard", vin, out) + require.NoError(t, err) + + // Assert equality on the job + assert.Equal(t, map[string]any{ + "display_name": "my dashboard", + "warehouse_id": "f00dcafe", + "parent_path": "/some/path", + "embed_credentials": true, + }, out.Dashboard["my_dashboard"]) + + // Assert equality on the permissions + assert.Equal(t, &schema.ResourcePermissions{ + DashboardId: "${databricks_dashboard.my_dashboard.id}", + AccessControl: []schema.ResourcePermissionsAccessControl{ + { + PermissionLevel: "CAN_VIEW", + UserName: "jane@doe.com", + }, + }, + }, out.Permissions["dashboard_my_dashboard"]) +} + +func TestConvertDashboardFilePath(t *testing.T) { + var src = resources.Dashboard{ + FilePath: "some/path", + } + + vin, err := convert.FromTyped(src, dyn.NilValue) + require.NoError(t, err) + + ctx := context.Background() + out := schema.NewResources() + err = dashboardConverter{}.Convert(ctx, "my_dashboard", vin, out) + require.NoError(t, err) + + // Assert that the "serialized_dashboard" is included. + assert.Subset(t, out.Dashboard["my_dashboard"], map[string]any{ + "serialized_dashboard": "${file(\"some/path\")}", + }) + + // Assert that the "file_path" doesn't carry over. + assert.NotSubset(t, out.Dashboard["my_dashboard"], map[string]any{ + "file_path": "some/path", + }) +} + +func TestConvertDashboardFilePathQuoted(t *testing.T) { + var src = resources.Dashboard{ + FilePath: `C:\foo\bar\baz\dashboard.lvdash.json`, + } + + vin, err := convert.FromTyped(src, dyn.NilValue) + require.NoError(t, err) + + ctx := context.Background() + out := schema.NewResources() + err = dashboardConverter{}.Convert(ctx, "my_dashboard", vin, out) + require.NoError(t, err) + + // Assert that the "serialized_dashboard" is included. + assert.Subset(t, out.Dashboard["my_dashboard"], map[string]any{ + "serialized_dashboard": `${file("C:\\foo\\bar\\baz\\dashboard.lvdash.json")}`, + }) + + // Assert that the "file_path" doesn't carry over. + assert.NotSubset(t, out.Dashboard["my_dashboard"], map[string]any{ + "file_path": `C:\foo\bar\baz\dashboard.lvdash.json`, + }) +} + +func TestConvertDashboardSerializedDashboardString(t *testing.T) { + var src = resources.Dashboard{ + SerializedDashboard: `{ "json": true }`, + } + + vin, err := convert.FromTyped(src, dyn.NilValue) + require.NoError(t, err) + + ctx := context.Background() + out := schema.NewResources() + err = dashboardConverter{}.Convert(ctx, "my_dashboard", vin, out) + require.NoError(t, err) + + // Assert that the "serialized_dashboard" is included. + assert.Subset(t, out.Dashboard["my_dashboard"], map[string]any{ + "serialized_dashboard": `{ "json": true }`, + }) +} + +func TestConvertDashboardSerializedDashboardAny(t *testing.T) { + var src = resources.Dashboard{ + SerializedDashboard: map[string]any{ + "pages": []map[string]any{ + { + "displayName": "New Page", + "layout": []map[string]any{}, + }, + }, + }, + } + + vin, err := convert.FromTyped(src, dyn.NilValue) + require.NoError(t, err) + + ctx := context.Background() + out := schema.NewResources() + err = dashboardConverter{}.Convert(ctx, "my_dashboard", vin, out) + require.NoError(t, err) + + // Assert that the "serialized_dashboard" is included. + assert.Subset(t, out.Dashboard["my_dashboard"], map[string]any{ + "serialized_dashboard": `{"pages":[{"displayName":"New Page","layout":[]}]}`, + }) +} diff --git a/bundle/deploy/terraform/tfdyn/convert_quality_monitor_test.go b/bundle/deploy/terraform/tfdyn/convert_quality_monitor_test.go index 50bfce7a..f71abf43 100644 --- a/bundle/deploy/terraform/tfdyn/convert_quality_monitor_test.go +++ b/bundle/deploy/terraform/tfdyn/convert_quality_monitor_test.go @@ -15,8 +15,8 @@ import ( func TestConvertQualityMonitor(t *testing.T) { var src = resources.QualityMonitor{ + TableName: "test_table_name", CreateMonitor: &catalog.CreateMonitor{ - TableName: "test_table_name", AssetsDir: "assets_dir", OutputSchemaName: "output_schema_name", InferenceLog: &catalog.MonitorInferenceLog{ diff --git a/bundle/deploy/terraform/util.go b/bundle/deploy/terraform/util.go index 64d667b5..4da015c2 100644 --- a/bundle/deploy/terraform/util.go +++ b/bundle/deploy/terraform/util.go @@ -13,7 +13,7 @@ import ( // Partial representation of the Terraform state file format. // We are only interested global version and serial numbers, -// plus resource types, names, modes, and ids. +// plus resource types, names, modes, IDs, and ETags (for dashboards). type resourcesState struct { Version int `json:"version"` Resources []stateResource `json:"resources"` @@ -33,7 +33,8 @@ type stateResourceInstance struct { } type stateInstanceAttributes struct { - ID string `json:"id"` + ID string `json:"id"` + ETag string `json:"etag,omitempty"` } func ParseResourcesState(ctx context.Context, b *bundle.Bundle) (*resourcesState, error) { diff --git a/bundle/internal/bundletest/mutate.go b/bundle/internal/bundletest/mutate.go new file mode 100644 index 00000000..c0ac630c --- /dev/null +++ b/bundle/internal/bundletest/mutate.go @@ -0,0 +1,20 @@ +package bundletest + +import ( + "context" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" + "github.com/databricks/cli/libs/dyn" + "github.com/stretchr/testify/require" +) + +func Mutate(t *testing.T, b *bundle.Bundle, f func(v dyn.Value) (dyn.Value, error)) { + diags := bundle.ApplyFunc(context.Background(), b, func(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + err := b.Config.Mutate(f) + require.NoError(t, err) + return nil + }) + require.NoError(t, diags.Error()) +} diff --git a/bundle/internal/schema/testdata/pass/quality_monitor.yml b/bundle/internal/schema/testdata/pass/quality_monitor.yml index a9be5932..79c4dd69 100644 --- a/bundle/internal/schema/testdata/pass/quality_monitor.yml +++ b/bundle/internal/schema/testdata/pass/quality_monitor.yml @@ -4,6 +4,7 @@ bundle: resources: quality_monitors: myqualitymonitor: + table_name: catalog.schema.quality_monitor inference_log: granularities: - a diff --git a/bundle/internal/tf/codegen/schema/version.go b/bundle/internal/tf/codegen/schema/version.go index 49e48a6e..cfbc46c0 100644 --- a/bundle/internal/tf/codegen/schema/version.go +++ b/bundle/internal/tf/codegen/schema/version.go @@ -1,3 +1,3 @@ package schema -const ProviderVersion = "1.53.0" +const ProviderVersion = "1.58.0" diff --git a/bundle/internal/tf/schema/data_source_functions.go b/bundle/internal/tf/schema/data_source_functions.go new file mode 100644 index 00000000..6085d752 --- /dev/null +++ b/bundle/internal/tf/schema/data_source_functions.go @@ -0,0 +1,98 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. + +package schema + +type DataSourceFunctionsFunctionsInputParamsParameters struct { + Comment string `json:"comment,omitempty"` + Name string `json:"name"` + ParameterDefault string `json:"parameter_default,omitempty"` + ParameterMode string `json:"parameter_mode,omitempty"` + ParameterType string `json:"parameter_type,omitempty"` + Position int `json:"position"` + TypeIntervalType string `json:"type_interval_type,omitempty"` + TypeJson string `json:"type_json,omitempty"` + TypeName string `json:"type_name"` + TypePrecision int `json:"type_precision,omitempty"` + TypeScale int `json:"type_scale,omitempty"` + TypeText string `json:"type_text"` +} + +type DataSourceFunctionsFunctionsInputParams struct { + Parameters []DataSourceFunctionsFunctionsInputParamsParameters `json:"parameters,omitempty"` +} + +type DataSourceFunctionsFunctionsReturnParamsParameters struct { + Comment string `json:"comment,omitempty"` + Name string `json:"name"` + ParameterDefault string `json:"parameter_default,omitempty"` + ParameterMode string `json:"parameter_mode,omitempty"` + ParameterType string `json:"parameter_type,omitempty"` + Position int `json:"position"` + TypeIntervalType string `json:"type_interval_type,omitempty"` + TypeJson string `json:"type_json,omitempty"` + TypeName string `json:"type_name"` + TypePrecision int `json:"type_precision,omitempty"` + TypeScale int `json:"type_scale,omitempty"` + TypeText string `json:"type_text"` +} + +type DataSourceFunctionsFunctionsReturnParams struct { + Parameters []DataSourceFunctionsFunctionsReturnParamsParameters `json:"parameters,omitempty"` +} + +type DataSourceFunctionsFunctionsRoutineDependenciesDependenciesFunction struct { + FunctionFullName string `json:"function_full_name"` +} + +type DataSourceFunctionsFunctionsRoutineDependenciesDependenciesTable struct { + TableFullName string `json:"table_full_name"` +} + +type DataSourceFunctionsFunctionsRoutineDependenciesDependencies struct { + Function []DataSourceFunctionsFunctionsRoutineDependenciesDependenciesFunction `json:"function,omitempty"` + Table []DataSourceFunctionsFunctionsRoutineDependenciesDependenciesTable `json:"table,omitempty"` +} + +type DataSourceFunctionsFunctionsRoutineDependencies struct { + Dependencies []DataSourceFunctionsFunctionsRoutineDependenciesDependencies `json:"dependencies,omitempty"` +} + +type DataSourceFunctionsFunctions struct { + BrowseOnly bool `json:"browse_only,omitempty"` + CatalogName string `json:"catalog_name,omitempty"` + Comment string `json:"comment,omitempty"` + CreatedAt int `json:"created_at,omitempty"` + CreatedBy string `json:"created_by,omitempty"` + DataType string `json:"data_type,omitempty"` + ExternalLanguage string `json:"external_language,omitempty"` + ExternalName string `json:"external_name,omitempty"` + FullDataType string `json:"full_data_type,omitempty"` + FullName string `json:"full_name,omitempty"` + FunctionId string `json:"function_id,omitempty"` + IsDeterministic bool `json:"is_deterministic,omitempty"` + IsNullCall bool `json:"is_null_call,omitempty"` + MetastoreId string `json:"metastore_id,omitempty"` + Name string `json:"name,omitempty"` + Owner string `json:"owner,omitempty"` + ParameterStyle string `json:"parameter_style,omitempty"` + Properties string `json:"properties,omitempty"` + RoutineBody string `json:"routine_body,omitempty"` + RoutineDefinition string `json:"routine_definition,omitempty"` + SchemaName string `json:"schema_name,omitempty"` + SecurityType string `json:"security_type,omitempty"` + SpecificName string `json:"specific_name,omitempty"` + SqlDataAccess string `json:"sql_data_access,omitempty"` + SqlPath string `json:"sql_path,omitempty"` + UpdatedAt int `json:"updated_at,omitempty"` + UpdatedBy string `json:"updated_by,omitempty"` + InputParams []DataSourceFunctionsFunctionsInputParams `json:"input_params,omitempty"` + ReturnParams []DataSourceFunctionsFunctionsReturnParams `json:"return_params,omitempty"` + RoutineDependencies []DataSourceFunctionsFunctionsRoutineDependencies `json:"routine_dependencies,omitempty"` +} + +type DataSourceFunctions struct { + CatalogName string `json:"catalog_name"` + IncludeBrowse bool `json:"include_browse,omitempty"` + SchemaName string `json:"schema_name"` + Functions []DataSourceFunctionsFunctions `json:"functions,omitempty"` +} diff --git a/bundle/internal/tf/schema/data_source_notification_destinations.go b/bundle/internal/tf/schema/data_source_notification_destinations.go new file mode 100644 index 00000000..c95ad6db --- /dev/null +++ b/bundle/internal/tf/schema/data_source_notification_destinations.go @@ -0,0 +1,15 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. + +package schema + +type DataSourceNotificationDestinationsNotificationDestinations struct { + DestinationType string `json:"destination_type,omitempty"` + DisplayName string `json:"display_name,omitempty"` + Id string `json:"id,omitempty"` +} + +type DataSourceNotificationDestinations struct { + DisplayNameContains string `json:"display_name_contains,omitempty"` + Type string `json:"type,omitempty"` + NotificationDestinations []DataSourceNotificationDestinationsNotificationDestinations `json:"notification_destinations,omitempty"` +} diff --git a/bundle/internal/tf/schema/data_source_registered_model.go b/bundle/internal/tf/schema/data_source_registered_model.go new file mode 100644 index 00000000..e19e0849 --- /dev/null +++ b/bundle/internal/tf/schema/data_source_registered_model.go @@ -0,0 +1,32 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. + +package schema + +type DataSourceRegisteredModelModelInfoAliases struct { + AliasName string `json:"alias_name,omitempty"` + VersionNum int `json:"version_num,omitempty"` +} + +type DataSourceRegisteredModelModelInfo struct { + BrowseOnly bool `json:"browse_only,omitempty"` + CatalogName string `json:"catalog_name,omitempty"` + Comment string `json:"comment,omitempty"` + CreatedAt int `json:"created_at,omitempty"` + CreatedBy string `json:"created_by,omitempty"` + FullName string `json:"full_name,omitempty"` + MetastoreId string `json:"metastore_id,omitempty"` + Name string `json:"name,omitempty"` + Owner string `json:"owner,omitempty"` + SchemaName string `json:"schema_name,omitempty"` + StorageLocation string `json:"storage_location,omitempty"` + UpdatedAt int `json:"updated_at,omitempty"` + UpdatedBy string `json:"updated_by,omitempty"` + Aliases []DataSourceRegisteredModelModelInfoAliases `json:"aliases,omitempty"` +} + +type DataSourceRegisteredModel struct { + FullName string `json:"full_name"` + IncludeAliases bool `json:"include_aliases,omitempty"` + IncludeBrowse bool `json:"include_browse,omitempty"` + ModelInfo []DataSourceRegisteredModelModelInfo `json:"model_info,omitempty"` +} diff --git a/bundle/internal/tf/schema/data_source_storage_credential.go b/bundle/internal/tf/schema/data_source_storage_credential.go index bf58f272..95c1afcd 100644 --- a/bundle/internal/tf/schema/data_source_storage_credential.go +++ b/bundle/internal/tf/schema/data_source_storage_credential.go @@ -35,6 +35,7 @@ type DataSourceStorageCredentialStorageCredentialInfo struct { Comment string `json:"comment,omitempty"` CreatedAt int `json:"created_at,omitempty"` CreatedBy string `json:"created_by,omitempty"` + FullName string `json:"full_name,omitempty"` Id string `json:"id,omitempty"` IsolationMode string `json:"isolation_mode,omitempty"` MetastoreId string `json:"metastore_id,omitempty"` diff --git a/bundle/internal/tf/schema/data_source_volumes.go b/bundle/internal/tf/schema/data_source_volumes.go index 07bf5933..cafc9e68 100644 --- a/bundle/internal/tf/schema/data_source_volumes.go +++ b/bundle/internal/tf/schema/data_source_volumes.go @@ -4,7 +4,6 @@ package schema type DataSourceVolumes struct { CatalogName string `json:"catalog_name"` - Id string `json:"id,omitempty"` Ids []string `json:"ids,omitempty"` SchemaName string `json:"schema_name"` } diff --git a/bundle/internal/tf/schema/data_sources.go b/bundle/internal/tf/schema/data_sources.go index 10829b99..e32609b0 100644 --- a/bundle/internal/tf/schema/data_sources.go +++ b/bundle/internal/tf/schema/data_sources.go @@ -21,6 +21,7 @@ type DataSources struct { Directory map[string]any `json:"databricks_directory,omitempty"` ExternalLocation map[string]any `json:"databricks_external_location,omitempty"` ExternalLocations map[string]any `json:"databricks_external_locations,omitempty"` + Functions map[string]any `json:"databricks_functions,omitempty"` Group map[string]any `json:"databricks_group,omitempty"` InstancePool map[string]any `json:"databricks_instance_pool,omitempty"` InstanceProfiles map[string]any `json:"databricks_instance_profiles,omitempty"` @@ -36,7 +37,9 @@ type DataSources struct { NodeType map[string]any `json:"databricks_node_type,omitempty"` Notebook map[string]any `json:"databricks_notebook,omitempty"` NotebookPaths map[string]any `json:"databricks_notebook_paths,omitempty"` + NotificationDestinations map[string]any `json:"databricks_notification_destinations,omitempty"` Pipelines map[string]any `json:"databricks_pipelines,omitempty"` + RegisteredModel map[string]any `json:"databricks_registered_model,omitempty"` Schema map[string]any `json:"databricks_schema,omitempty"` Schemas map[string]any `json:"databricks_schemas,omitempty"` ServicePrincipal map[string]any `json:"databricks_service_principal,omitempty"` @@ -77,6 +80,7 @@ func NewDataSources() *DataSources { Directory: make(map[string]any), ExternalLocation: make(map[string]any), ExternalLocations: make(map[string]any), + Functions: make(map[string]any), Group: make(map[string]any), InstancePool: make(map[string]any), InstanceProfiles: make(map[string]any), @@ -92,7 +96,9 @@ func NewDataSources() *DataSources { NodeType: make(map[string]any), Notebook: make(map[string]any), NotebookPaths: make(map[string]any), + NotificationDestinations: make(map[string]any), Pipelines: make(map[string]any), + RegisteredModel: make(map[string]any), Schema: make(map[string]any), Schemas: make(map[string]any), ServicePrincipal: make(map[string]any), diff --git a/bundle/internal/tf/schema/resource_alert.go b/bundle/internal/tf/schema/resource_alert.go new file mode 100644 index 00000000..c539d5fe --- /dev/null +++ b/bundle/internal/tf/schema/resource_alert.go @@ -0,0 +1,46 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. + +package schema + +type ResourceAlertConditionOperandColumn struct { + Name string `json:"name"` +} + +type ResourceAlertConditionOperand struct { + Column *ResourceAlertConditionOperandColumn `json:"column,omitempty"` +} + +type ResourceAlertConditionThresholdValue struct { + BoolValue bool `json:"bool_value,omitempty"` + DoubleValue int `json:"double_value,omitempty"` + StringValue string `json:"string_value,omitempty"` +} + +type ResourceAlertConditionThreshold struct { + Value *ResourceAlertConditionThresholdValue `json:"value,omitempty"` +} + +type ResourceAlertCondition struct { + EmptyResultState string `json:"empty_result_state,omitempty"` + Op string `json:"op"` + Operand *ResourceAlertConditionOperand `json:"operand,omitempty"` + Threshold *ResourceAlertConditionThreshold `json:"threshold,omitempty"` +} + +type ResourceAlert struct { + CreateTime string `json:"create_time,omitempty"` + CustomBody string `json:"custom_body,omitempty"` + CustomSubject string `json:"custom_subject,omitempty"` + DisplayName string `json:"display_name"` + Id string `json:"id,omitempty"` + LifecycleState string `json:"lifecycle_state,omitempty"` + NotifyOnOk bool `json:"notify_on_ok,omitempty"` + OwnerUserName string `json:"owner_user_name,omitempty"` + ParentPath string `json:"parent_path,omitempty"` + QueryId string `json:"query_id"` + SecondsToRetrigger int `json:"seconds_to_retrigger,omitempty"` + State string `json:"state,omitempty"` + TriggerTime string `json:"trigger_time,omitempty"` + UpdateTime string `json:"update_time,omitempty"` + Condition *ResourceAlertCondition `json:"condition,omitempty"` +} diff --git a/bundle/internal/tf/schema/resource_custom_app_integration.go b/bundle/internal/tf/schema/resource_custom_app_integration.go new file mode 100644 index 00000000..e89eb7fe --- /dev/null +++ b/bundle/internal/tf/schema/resource_custom_app_integration.go @@ -0,0 +1,23 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. + +package schema + +type ResourceCustomAppIntegrationTokenAccessPolicy struct { + AccessTokenTtlInMinutes int `json:"access_token_ttl_in_minutes,omitempty"` + RefreshTokenTtlInMinutes int `json:"refresh_token_ttl_in_minutes,omitempty"` +} + +type ResourceCustomAppIntegration struct { + ClientId string `json:"client_id,omitempty"` + ClientSecret string `json:"client_secret,omitempty"` + Confidential bool `json:"confidential,omitempty"` + CreateTime string `json:"create_time,omitempty"` + CreatedBy int `json:"created_by,omitempty"` + CreatorUsername string `json:"creator_username,omitempty"` + Id string `json:"id,omitempty"` + IntegrationId string `json:"integration_id,omitempty"` + Name string `json:"name,omitempty"` + RedirectUrls []string `json:"redirect_urls,omitempty"` + Scopes []string `json:"scopes,omitempty"` + TokenAccessPolicy *ResourceCustomAppIntegrationTokenAccessPolicy `json:"token_access_policy,omitempty"` +} diff --git a/bundle/internal/tf/schema/resource_job.go b/bundle/internal/tf/schema/resource_job.go index 42b648b0..c89eafab 100644 --- a/bundle/internal/tf/schema/resource_job.go +++ b/bundle/internal/tf/schema/resource_job.go @@ -1448,6 +1448,7 @@ type ResourceJobWebhookNotifications struct { type ResourceJob struct { AlwaysRunning bool `json:"always_running,omitempty"` + BudgetPolicyId string `json:"budget_policy_id,omitempty"` ControlRunState bool `json:"control_run_state,omitempty"` Description string `json:"description,omitempty"` EditMode string `json:"edit_mode,omitempty"` diff --git a/bundle/internal/tf/schema/resource_library.go b/bundle/internal/tf/schema/resource_library.go index 385d992d..4fad7dbd 100644 --- a/bundle/internal/tf/schema/resource_library.go +++ b/bundle/internal/tf/schema/resource_library.go @@ -19,13 +19,13 @@ type ResourceLibraryPypi struct { } type ResourceLibrary struct { - ClusterId string `json:"cluster_id"` - Egg string `json:"egg,omitempty"` - Id string `json:"id,omitempty"` - Jar string `json:"jar,omitempty"` - Requirements string `json:"requirements,omitempty"` - Whl string `json:"whl,omitempty"` - Cran *ResourceLibraryCran `json:"cran,omitempty"` - Maven *ResourceLibraryMaven `json:"maven,omitempty"` - Pypi *ResourceLibraryPypi `json:"pypi,omitempty"` + ClusterId string `json:"cluster_id"` + Egg string `json:"egg,omitempty"` + Id string `json:"id,omitempty"` + Jar string `json:"jar,omitempty"` + Requirements string `json:"requirements,omitempty"` + Whl string `json:"whl,omitempty"` + Cran []ResourceLibraryCran `json:"cran,omitempty"` + Maven []ResourceLibraryMaven `json:"maven,omitempty"` + Pypi []ResourceLibraryPypi `json:"pypi,omitempty"` } diff --git a/bundle/internal/tf/schema/resource_online_table.go b/bundle/internal/tf/schema/resource_online_table.go index de671ead..58d6f4ba 100644 --- a/bundle/internal/tf/schema/resource_online_table.go +++ b/bundle/internal/tf/schema/resource_online_table.go @@ -19,9 +19,10 @@ type ResourceOnlineTableSpec struct { } type ResourceOnlineTable struct { - Id string `json:"id,omitempty"` - Name string `json:"name"` - Status []any `json:"status,omitempty"` - TableServingUrl string `json:"table_serving_url,omitempty"` - Spec *ResourceOnlineTableSpec `json:"spec,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name"` + Status []any `json:"status,omitempty"` + TableServingUrl string `json:"table_serving_url,omitempty"` + UnityCatalogProvisioningState string `json:"unity_catalog_provisioning_state,omitempty"` + Spec *ResourceOnlineTableSpec `json:"spec,omitempty"` } diff --git a/bundle/internal/tf/schema/resource_pipeline.go b/bundle/internal/tf/schema/resource_pipeline.go index 1bed91fc..7238d24a 100644 --- a/bundle/internal/tf/schema/resource_pipeline.go +++ b/bundle/internal/tf/schema/resource_pipeline.go @@ -137,15 +137,32 @@ type ResourcePipelineFilters struct { type ResourcePipelineGatewayDefinition struct { ConnectionId string `json:"connection_id,omitempty"` + ConnectionName string `json:"connection_name,omitempty"` GatewayStorageCatalog string `json:"gateway_storage_catalog,omitempty"` GatewayStorageName string `json:"gateway_storage_name,omitempty"` GatewayStorageSchema string `json:"gateway_storage_schema,omitempty"` } +type ResourcePipelineIngestionDefinitionObjectsReportTableConfiguration struct { + PrimaryKeys []string `json:"primary_keys,omitempty"` + SalesforceIncludeFormulaFields bool `json:"salesforce_include_formula_fields,omitempty"` + ScdType string `json:"scd_type,omitempty"` + SequenceBy []string `json:"sequence_by,omitempty"` +} + +type ResourcePipelineIngestionDefinitionObjectsReport struct { + DestinationCatalog string `json:"destination_catalog,omitempty"` + DestinationSchema string `json:"destination_schema,omitempty"` + DestinationTable string `json:"destination_table,omitempty"` + SourceUrl string `json:"source_url,omitempty"` + TableConfiguration *ResourcePipelineIngestionDefinitionObjectsReportTableConfiguration `json:"table_configuration,omitempty"` +} + type ResourcePipelineIngestionDefinitionObjectsSchemaTableConfiguration struct { PrimaryKeys []string `json:"primary_keys,omitempty"` SalesforceIncludeFormulaFields bool `json:"salesforce_include_formula_fields,omitempty"` ScdType string `json:"scd_type,omitempty"` + SequenceBy []string `json:"sequence_by,omitempty"` } type ResourcePipelineIngestionDefinitionObjectsSchema struct { @@ -160,6 +177,7 @@ type ResourcePipelineIngestionDefinitionObjectsTableTableConfiguration struct { PrimaryKeys []string `json:"primary_keys,omitempty"` SalesforceIncludeFormulaFields bool `json:"salesforce_include_formula_fields,omitempty"` ScdType string `json:"scd_type,omitempty"` + SequenceBy []string `json:"sequence_by,omitempty"` } type ResourcePipelineIngestionDefinitionObjectsTable struct { @@ -173,6 +191,7 @@ type ResourcePipelineIngestionDefinitionObjectsTable struct { } type ResourcePipelineIngestionDefinitionObjects struct { + Report *ResourcePipelineIngestionDefinitionObjectsReport `json:"report,omitempty"` Schema *ResourcePipelineIngestionDefinitionObjectsSchema `json:"schema,omitempty"` Table *ResourcePipelineIngestionDefinitionObjectsTable `json:"table,omitempty"` } @@ -181,6 +200,7 @@ type ResourcePipelineIngestionDefinitionTableConfiguration struct { PrimaryKeys []string `json:"primary_keys,omitempty"` SalesforceIncludeFormulaFields bool `json:"salesforce_include_formula_fields,omitempty"` ScdType string `json:"scd_type,omitempty"` + SequenceBy []string `json:"sequence_by,omitempty"` } type ResourcePipelineIngestionDefinition struct { @@ -223,6 +243,12 @@ type ResourcePipelineNotification struct { EmailRecipients []string `json:"email_recipients,omitempty"` } +type ResourcePipelineRestartWindow struct { + DaysOfWeek string `json:"days_of_week,omitempty"` + StartHour int `json:"start_hour"` + TimeZoneId string `json:"time_zone_id,omitempty"` +} + type ResourcePipelineTriggerCron struct { QuartzCronSchedule string `json:"quartz_cron_schedule,omitempty"` TimezoneId string `json:"timezone_id,omitempty"` @@ -269,5 +295,6 @@ type ResourcePipeline struct { LatestUpdates []ResourcePipelineLatestUpdates `json:"latest_updates,omitempty"` Library []ResourcePipelineLibrary `json:"library,omitempty"` Notification []ResourcePipelineNotification `json:"notification,omitempty"` + RestartWindow *ResourcePipelineRestartWindow `json:"restart_window,omitempty"` Trigger *ResourcePipelineTrigger `json:"trigger,omitempty"` } diff --git a/bundle/internal/tf/schema/resource_query.go b/bundle/internal/tf/schema/resource_query.go new file mode 100644 index 00000000..dc8e517c --- /dev/null +++ b/bundle/internal/tf/schema/resource_query.go @@ -0,0 +1,84 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. + +package schema + +type ResourceQueryParameterDateRangeValueDateRangeValue struct { + End string `json:"end"` + Start string `json:"start"` +} + +type ResourceQueryParameterDateRangeValue struct { + DynamicDateRangeValue string `json:"dynamic_date_range_value,omitempty"` + Precision string `json:"precision,omitempty"` + StartDayOfWeek int `json:"start_day_of_week,omitempty"` + DateRangeValue *ResourceQueryParameterDateRangeValueDateRangeValue `json:"date_range_value,omitempty"` +} + +type ResourceQueryParameterDateValue struct { + DateValue string `json:"date_value,omitempty"` + DynamicDateValue string `json:"dynamic_date_value,omitempty"` + Precision string `json:"precision,omitempty"` +} + +type ResourceQueryParameterEnumValueMultiValuesOptions struct { + Prefix string `json:"prefix,omitempty"` + Separator string `json:"separator,omitempty"` + Suffix string `json:"suffix,omitempty"` +} + +type ResourceQueryParameterEnumValue struct { + EnumOptions string `json:"enum_options,omitempty"` + Values []string `json:"values,omitempty"` + MultiValuesOptions *ResourceQueryParameterEnumValueMultiValuesOptions `json:"multi_values_options,omitempty"` +} + +type ResourceQueryParameterNumericValue struct { + Value int `json:"value"` +} + +type ResourceQueryParameterQueryBackedValueMultiValuesOptions struct { + Prefix string `json:"prefix,omitempty"` + Separator string `json:"separator,omitempty"` + Suffix string `json:"suffix,omitempty"` +} + +type ResourceQueryParameterQueryBackedValue struct { + QueryId string `json:"query_id"` + Values []string `json:"values,omitempty"` + MultiValuesOptions *ResourceQueryParameterQueryBackedValueMultiValuesOptions `json:"multi_values_options,omitempty"` +} + +type ResourceQueryParameterTextValue struct { + Value string `json:"value"` +} + +type ResourceQueryParameter struct { + Name string `json:"name"` + Title string `json:"title,omitempty"` + DateRangeValue *ResourceQueryParameterDateRangeValue `json:"date_range_value,omitempty"` + DateValue *ResourceQueryParameterDateValue `json:"date_value,omitempty"` + EnumValue *ResourceQueryParameterEnumValue `json:"enum_value,omitempty"` + NumericValue *ResourceQueryParameterNumericValue `json:"numeric_value,omitempty"` + QueryBackedValue *ResourceQueryParameterQueryBackedValue `json:"query_backed_value,omitempty"` + TextValue *ResourceQueryParameterTextValue `json:"text_value,omitempty"` +} + +type ResourceQuery struct { + ApplyAutoLimit bool `json:"apply_auto_limit,omitempty"` + Catalog string `json:"catalog,omitempty"` + CreateTime string `json:"create_time,omitempty"` + Description string `json:"description,omitempty"` + DisplayName string `json:"display_name"` + Id string `json:"id,omitempty"` + LastModifierUserName string `json:"last_modifier_user_name,omitempty"` + LifecycleState string `json:"lifecycle_state,omitempty"` + OwnerUserName string `json:"owner_user_name,omitempty"` + ParentPath string `json:"parent_path,omitempty"` + QueryText string `json:"query_text"` + RunAsMode string `json:"run_as_mode,omitempty"` + Schema string `json:"schema,omitempty"` + Tags []string `json:"tags,omitempty"` + UpdateTime string `json:"update_time,omitempty"` + WarehouseId string `json:"warehouse_id"` + Parameter []ResourceQueryParameter `json:"parameter,omitempty"` +} diff --git a/bundle/internal/tf/schema/resources.go b/bundle/internal/tf/schema/resources.go index 53f558df..ea5b618f 100644 --- a/bundle/internal/tf/schema/resources.go +++ b/bundle/internal/tf/schema/resources.go @@ -4,6 +4,7 @@ package schema type Resources struct { AccessControlRuleSet map[string]any `json:"databricks_access_control_rule_set,omitempty"` + Alert map[string]any `json:"databricks_alert,omitempty"` ArtifactAllowlist map[string]any `json:"databricks_artifact_allowlist,omitempty"` AutomaticClusterUpdateWorkspaceSetting map[string]any `json:"databricks_automatic_cluster_update_workspace_setting,omitempty"` AwsS3Mount map[string]any `json:"databricks_aws_s3_mount,omitempty"` @@ -17,6 +18,7 @@ type Resources struct { ClusterPolicy map[string]any `json:"databricks_cluster_policy,omitempty"` ComplianceSecurityProfileWorkspaceSetting map[string]any `json:"databricks_compliance_security_profile_workspace_setting,omitempty"` Connection map[string]any `json:"databricks_connection,omitempty"` + CustomAppIntegration map[string]any `json:"databricks_custom_app_integration,omitempty"` Dashboard map[string]any `json:"databricks_dashboard,omitempty"` DbfsFile map[string]any `json:"databricks_dbfs_file,omitempty"` DefaultNamespaceSetting map[string]any `json:"databricks_default_namespace_setting,omitempty"` @@ -68,6 +70,7 @@ type Resources struct { Pipeline map[string]any `json:"databricks_pipeline,omitempty"` Provider map[string]any `json:"databricks_provider,omitempty"` QualityMonitor map[string]any `json:"databricks_quality_monitor,omitempty"` + Query map[string]any `json:"databricks_query,omitempty"` Recipient map[string]any `json:"databricks_recipient,omitempty"` RegisteredModel map[string]any `json:"databricks_registered_model,omitempty"` Repo map[string]any `json:"databricks_repo,omitempty"` @@ -107,6 +110,7 @@ type Resources struct { func NewResources() *Resources { return &Resources{ AccessControlRuleSet: make(map[string]any), + Alert: make(map[string]any), ArtifactAllowlist: make(map[string]any), AutomaticClusterUpdateWorkspaceSetting: make(map[string]any), AwsS3Mount: make(map[string]any), @@ -120,6 +124,7 @@ func NewResources() *Resources { ClusterPolicy: make(map[string]any), ComplianceSecurityProfileWorkspaceSetting: make(map[string]any), Connection: make(map[string]any), + CustomAppIntegration: make(map[string]any), Dashboard: make(map[string]any), DbfsFile: make(map[string]any), DefaultNamespaceSetting: make(map[string]any), @@ -171,6 +176,7 @@ func NewResources() *Resources { Pipeline: make(map[string]any), Provider: make(map[string]any), QualityMonitor: make(map[string]any), + Query: make(map[string]any), Recipient: make(map[string]any), RegisteredModel: make(map[string]any), Repo: make(map[string]any), diff --git a/bundle/internal/tf/schema/root.go b/bundle/internal/tf/schema/root.go index 7a0cc01f..7ccb7a0f 100644 --- a/bundle/internal/tf/schema/root.go +++ b/bundle/internal/tf/schema/root.go @@ -21,7 +21,7 @@ type Root struct { const ProviderHost = "registry.terraform.io" const ProviderSource = "databricks/databricks" -const ProviderVersion = "1.53.0" +const ProviderVersion = "1.58.0" func NewRoot() *Root { return &Root{ diff --git a/bundle/libraries/workspace_path.go b/bundle/libraries/path.go similarity index 75% rename from bundle/libraries/workspace_path.go rename to bundle/libraries/path.go index 126ad3f1..418d9ca7 100644 --- a/bundle/libraries/workspace_path.go +++ b/bundle/libraries/path.go @@ -36,3 +36,13 @@ func IsWorkspaceLibrary(library *compute.Library) bool { return IsWorkspacePath(path) } + +// IsVolumesPath returns true if the specified path indicates that +// it should be interpreted as a Databricks Volumes path. +func IsVolumesPath(path string) bool { + return strings.HasPrefix(path, "/Volumes/") +} + +func IsWorkspaceSharedPath(path string) bool { + return strings.HasPrefix(path, "/Workspace/Shared/") +} diff --git a/bundle/libraries/workspace_path_test.go b/bundle/libraries/path_test.go similarity index 77% rename from bundle/libraries/workspace_path_test.go rename to bundle/libraries/path_test.go index feaaab7f..90fe187a 100644 --- a/bundle/libraries/workspace_path_test.go +++ b/bundle/libraries/path_test.go @@ -31,3 +31,13 @@ func TestIsWorkspaceLibrary(t *testing.T) { // Empty. assert.False(t, IsWorkspaceLibrary(&compute.Library{})) } + +func TestIsVolumesPath(t *testing.T) { + // Absolute paths with particular prefixes. + assert.True(t, IsVolumesPath("/Volumes/path/to/package")) + + // Relative paths. + assert.False(t, IsVolumesPath("myfile.txt")) + assert.False(t, IsVolumesPath("./myfile.txt")) + assert.False(t, IsVolumesPath("../myfile.txt")) +} diff --git a/bundle/paths/paths.go b/bundle/paths/paths.go new file mode 100644 index 00000000..e3a6b0ae --- /dev/null +++ b/bundle/paths/paths.go @@ -0,0 +1,39 @@ +package paths + +import ( + "strings" + + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/libraries" +) + +func CollectUniqueWorkspacePathPrefixes(workspace config.Workspace) []string { + rootPath := workspace.RootPath + paths := []string{} + if !libraries.IsVolumesPath(rootPath) { + paths = append(paths, rootPath) + } + + if !strings.HasSuffix(rootPath, "/") { + rootPath += "/" + } + + for _, p := range []string{ + workspace.ArtifactPath, + workspace.FilePath, + workspace.StatePath, + workspace.ResourcePath, + } { + if libraries.IsVolumesPath(p) { + continue + } + + if strings.HasPrefix(p, rootPath) { + continue + } + + paths = append(paths, p) + } + + return paths +} diff --git a/bundle/permissions/mutator.go b/bundle/permissions/mutator.go index 7787bc04..bc1392d9 100644 --- a/bundle/permissions/mutator.go +++ b/bundle/permissions/mutator.go @@ -39,6 +39,10 @@ var levelsMap = map[string](map[string]string){ CAN_VIEW: "CAN_VIEW", CAN_RUN: "CAN_QUERY", }, + "dashboards": { + CAN_MANAGE: "CAN_MANAGE", + CAN_VIEW: "CAN_READ", + }, } type bundlePermissions struct{} diff --git a/bundle/permissions/validate.go b/bundle/permissions/validate.go index acd2e606..f1a18f43 100644 --- a/bundle/permissions/validate.go +++ b/bundle/permissions/validate.go @@ -3,9 +3,9 @@ package permissions import ( "context" "fmt" - "strings" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/libraries" "github.com/databricks/cli/libs/diag" ) @@ -21,17 +21,13 @@ func (*validateSharedRootPermissions) Name() string { } func (*validateSharedRootPermissions) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { - if isWorkspaceSharedRoot(b.Config.Workspace.RootPath) { + if libraries.IsWorkspaceSharedPath(b.Config.Workspace.RootPath) { return isUsersGroupPermissionSet(b) } return nil } -func isWorkspaceSharedRoot(path string) bool { - return strings.HasPrefix(path, "/Workspace/Shared/") -} - // isUsersGroupPermissionSet checks that top-level permissions set for bundle contain group_name: users with CAN_MANAGE permission. func isUsersGroupPermissionSet(b *bundle.Bundle) diag.Diagnostics { var diags diag.Diagnostics diff --git a/bundle/permissions/workspace_path_permissions.go b/bundle/permissions/workspace_path_permissions.go new file mode 100644 index 00000000..a3b4424c --- /dev/null +++ b/bundle/permissions/workspace_path_permissions.go @@ -0,0 +1,89 @@ +package permissions + +import ( + "fmt" + "strings" + + "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/cli/libs/diag" + "github.com/databricks/databricks-sdk-go/service/workspace" +) + +type WorkspacePathPermissions struct { + Path string + Permissions []resources.Permission +} + +func ObjectAclToResourcePermissions(path string, acl []workspace.WorkspaceObjectAccessControlResponse) *WorkspacePathPermissions { + permissions := make([]resources.Permission, 0) + for _, a := range acl { + // Skip the admin group because it's added to all resources by default. + if a.GroupName == "admins" { + continue + } + + for _, pl := range a.AllPermissions { + permissions = append(permissions, resources.Permission{ + Level: convertWorkspaceObjectPermissionLevel(pl.PermissionLevel), + GroupName: a.GroupName, + UserName: a.UserName, + ServicePrincipalName: a.ServicePrincipalName, + }) + } + } + + return &WorkspacePathPermissions{Permissions: permissions, Path: path} +} + +func (p WorkspacePathPermissions) Compare(perms []resources.Permission) diag.Diagnostics { + var diags diag.Diagnostics + + // Check the permissions in the workspace and see if they are all set in the bundle. + ok, missing := containsAll(p.Permissions, perms) + if !ok { + diags = diags.Append(diag.Diagnostic{ + Severity: diag.Warning, + Summary: "untracked permissions apply to target workspace path", + Detail: fmt.Sprintf("The following permissions apply to the workspace folder at %q but are not configured in the bundle:\n%s", p.Path, toString(missing)), + }) + } + + return diags +} + +// containsAll checks if permA contains all permissions in permB. +func containsAll(permA []resources.Permission, permB []resources.Permission) (bool, []resources.Permission) { + missing := make([]resources.Permission, 0) + for _, a := range permA { + found := false + for _, b := range permB { + if a == b { + found = true + break + } + } + if !found { + missing = append(missing, a) + } + } + return len(missing) == 0, missing +} + +// convertWorkspaceObjectPermissionLevel converts matching object permission levels to bundle ones. +// If there is no matching permission level, it returns permission level as is, for example, CAN_EDIT. +func convertWorkspaceObjectPermissionLevel(level workspace.WorkspaceObjectPermissionLevel) string { + switch level { + case workspace.WorkspaceObjectPermissionLevelCanRead: + return CAN_VIEW + default: + return string(level) + } +} + +func toString(p []resources.Permission) string { + var sb strings.Builder + for _, perm := range p { + sb.WriteString(fmt.Sprintf("- %s\n", perm.String())) + } + return sb.String() +} diff --git a/bundle/permissions/workspace_path_permissions_test.go b/bundle/permissions/workspace_path_permissions_test.go new file mode 100644 index 00000000..0bb00474 --- /dev/null +++ b/bundle/permissions/workspace_path_permissions_test.go @@ -0,0 +1,121 @@ +package permissions + +import ( + "testing" + + "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/cli/libs/diag" + "github.com/databricks/databricks-sdk-go/service/workspace" + "github.com/stretchr/testify/require" +) + +func TestWorkspacePathPermissionsCompare(t *testing.T) { + testCases := []struct { + perms []resources.Permission + acl []workspace.WorkspaceObjectAccessControlResponse + expected diag.Diagnostics + }{ + { + perms: []resources.Permission{ + {Level: CAN_MANAGE, UserName: "foo@bar.com"}, + }, + acl: []workspace.WorkspaceObjectAccessControlResponse{ + { + UserName: "foo@bar.com", + AllPermissions: []workspace.WorkspaceObjectPermission{ + {PermissionLevel: "CAN_MANAGE"}, + }, + }, + }, + expected: nil, + }, + { + perms: []resources.Permission{ + {Level: CAN_MANAGE, UserName: "foo@bar.com"}, + }, + acl: []workspace.WorkspaceObjectAccessControlResponse{ + { + UserName: "foo@bar.com", + AllPermissions: []workspace.WorkspaceObjectPermission{ + {PermissionLevel: "CAN_MANAGE"}, + }, + }, + { + GroupName: "admins", + AllPermissions: []workspace.WorkspaceObjectPermission{ + {PermissionLevel: "CAN_MANAGE"}, + }, + }, + }, + expected: nil, + }, + { + perms: []resources.Permission{ + {Level: CAN_VIEW, UserName: "foo@bar.com"}, + {Level: CAN_MANAGE, ServicePrincipalName: "sp.com"}, + }, + acl: []workspace.WorkspaceObjectAccessControlResponse{ + { + UserName: "foo@bar.com", + AllPermissions: []workspace.WorkspaceObjectPermission{ + {PermissionLevel: "CAN_READ"}, + }, + }, + }, + expected: nil, + }, + { + perms: []resources.Permission{ + {Level: CAN_MANAGE, UserName: "foo@bar.com"}, + }, + acl: []workspace.WorkspaceObjectAccessControlResponse{ + { + UserName: "foo@bar.com", + AllPermissions: []workspace.WorkspaceObjectPermission{ + {PermissionLevel: "CAN_MANAGE"}, + }, + }, + { + GroupName: "foo", + AllPermissions: []workspace.WorkspaceObjectPermission{ + {PermissionLevel: "CAN_MANAGE"}, + }, + }, + }, + expected: diag.Diagnostics{ + { + Severity: diag.Warning, + Summary: "untracked permissions apply to target workspace path", + Detail: "The following permissions apply to the workspace folder at \"path\" but are not configured in the bundle:\n- level: CAN_MANAGE, group_name: foo\n", + }, + }, + }, + { + perms: []resources.Permission{ + {Level: CAN_MANAGE, UserName: "foo@bar.com"}, + }, + acl: []workspace.WorkspaceObjectAccessControlResponse{ + { + UserName: "foo2@bar.com", + AllPermissions: []workspace.WorkspaceObjectPermission{ + {PermissionLevel: "CAN_MANAGE"}, + }, + }, + }, + expected: diag.Diagnostics{ + { + Severity: diag.Warning, + Summary: "untracked permissions apply to target workspace path", + Detail: "The following permissions apply to the workspace folder at \"path\" but are not configured in the bundle:\n- level: CAN_MANAGE, user_name: foo2@bar.com\n", + }, + }, + }, + } + + for _, tc := range testCases { + wp := ObjectAclToResourcePermissions("path", tc.acl) + diags := wp.Compare(tc.perms) + require.Equal(t, tc.expected, diags) + } + +} diff --git a/bundle/permissions/workspace_root.go b/bundle/permissions/workspace_root.go index e7867521..de4f3a7f 100644 --- a/bundle/permissions/workspace_root.go +++ b/bundle/permissions/workspace_root.go @@ -5,8 +5,11 @@ import ( "fmt" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/libraries" + "github.com/databricks/cli/bundle/paths" "github.com/databricks/cli/libs/diag" "github.com/databricks/databricks-sdk-go/service/workspace" + "golang.org/x/sync/errgroup" ) type workspaceRootPermissions struct { @@ -34,7 +37,7 @@ func giveAccessForWorkspaceRoot(ctx context.Context, b *bundle.Bundle) error { permissions := make([]workspace.WorkspaceObjectAccessControlRequest, 0) for _, p := range b.Config.Permissions { - level, err := getWorkspaceObjectPermissionLevel(p.Level) + level, err := GetWorkspaceObjectPermissionLevel(p.Level) if err != nil { return err } @@ -52,20 +55,39 @@ func giveAccessForWorkspaceRoot(ctx context.Context, b *bundle.Bundle) error { } w := b.WorkspaceClient().Workspace - obj, err := w.GetStatusByPath(ctx, b.Config.Workspace.RootPath) + bundlePaths := paths.CollectUniqueWorkspacePathPrefixes(b.Config.Workspace) + + g, ctx := errgroup.WithContext(ctx) + for _, p := range bundlePaths { + g.Go(func() error { + return setPermissions(ctx, w, p, permissions) + }) + } + + return g.Wait() +} + +func setPermissions(ctx context.Context, w workspace.WorkspaceInterface, path string, permissions []workspace.WorkspaceObjectAccessControlRequest) error { + // If the folder is shared, then we don't need to set permissions since it's always set for all users and it's checked in mutators before. + if libraries.IsWorkspaceSharedPath(path) { + return nil + } + + obj, err := w.GetStatusByPath(ctx, path) if err != nil { return err } - _, err = w.UpdatePermissions(ctx, workspace.WorkspaceObjectPermissionsRequest{ + _, err = w.SetPermissions(ctx, workspace.WorkspaceObjectPermissionsRequest{ WorkspaceObjectId: fmt.Sprint(obj.ObjectId), WorkspaceObjectType: "directories", AccessControlList: permissions, }) + return err } -func getWorkspaceObjectPermissionLevel(bundlePermission string) (workspace.WorkspaceObjectPermissionLevel, error) { +func GetWorkspaceObjectPermissionLevel(bundlePermission string) (workspace.WorkspaceObjectPermissionLevel, error) { switch bundlePermission { case CAN_MANAGE: return workspace.WorkspaceObjectPermissionLevelCanManage, nil diff --git a/bundle/permissions/workspace_root_test.go b/bundle/permissions/workspace_root_test.go index 6b37b2c4..c48704a6 100644 --- a/bundle/permissions/workspace_root_test.go +++ b/bundle/permissions/workspace_root_test.go @@ -21,7 +21,11 @@ func TestApplyWorkspaceRootPermissions(t *testing.T) { b := &bundle.Bundle{ Config: config.Root{ Workspace: config.Workspace{ - RootPath: "/Users/foo@bar.com", + RootPath: "/Users/foo@bar.com", + ArtifactPath: "/Users/foo@bar.com/artifacts", + FilePath: "/Users/foo@bar.com/files", + StatePath: "/Users/foo@bar.com/state", + ResourcePath: "/Users/foo@bar.com/resources", }, Permissions: []resources.Permission{ {Level: CAN_MANAGE, UserName: "TestUser"}, @@ -59,7 +63,7 @@ func TestApplyWorkspaceRootPermissions(t *testing.T) { workspaceApi.EXPECT().GetStatusByPath(mock.Anything, "/Users/foo@bar.com").Return(&workspace.ObjectInfo{ ObjectId: 1234, }, nil) - workspaceApi.EXPECT().UpdatePermissions(mock.Anything, workspace.WorkspaceObjectPermissionsRequest{ + workspaceApi.EXPECT().SetPermissions(mock.Anything, workspace.WorkspaceObjectPermissionsRequest{ AccessControlList: []workspace.WorkspaceObjectAccessControlRequest{ {UserName: "TestUser", PermissionLevel: "CAN_MANAGE"}, {GroupName: "TestGroup", PermissionLevel: "CAN_READ"}, @@ -72,3 +76,116 @@ func TestApplyWorkspaceRootPermissions(t *testing.T) { diags := bundle.Apply(context.Background(), b, bundle.Seq(ValidateSharedRootPermissions(), ApplyWorkspaceRootPermissions())) require.Empty(t, diags) } + +func TestApplyWorkspaceRootPermissionsForAllPaths(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Workspace: config.Workspace{ + RootPath: "/Some/Root/Path", + ArtifactPath: "/Users/foo@bar.com/artifacts", + FilePath: "/Users/foo@bar.com/files", + StatePath: "/Users/foo@bar.com/state", + ResourcePath: "/Users/foo@bar.com/resources", + }, + Permissions: []resources.Permission{ + {Level: CAN_MANAGE, UserName: "TestUser"}, + {Level: CAN_VIEW, GroupName: "TestGroup"}, + {Level: CAN_RUN, ServicePrincipalName: "TestServicePrincipal"}, + }, + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "job_1": {JobSettings: &jobs.JobSettings{Name: "job_1"}}, + "job_2": {JobSettings: &jobs.JobSettings{Name: "job_2"}}, + }, + Pipelines: map[string]*resources.Pipeline{ + "pipeline_1": {PipelineSpec: &pipelines.PipelineSpec{}}, + "pipeline_2": {PipelineSpec: &pipelines.PipelineSpec{}}, + }, + Models: map[string]*resources.MlflowModel{ + "model_1": {Model: &ml.Model{}}, + "model_2": {Model: &ml.Model{}}, + }, + Experiments: map[string]*resources.MlflowExperiment{ + "experiment_1": {Experiment: &ml.Experiment{}}, + "experiment_2": {Experiment: &ml.Experiment{}}, + }, + ModelServingEndpoints: map[string]*resources.ModelServingEndpoint{ + "endpoint_1": {CreateServingEndpoint: &serving.CreateServingEndpoint{}}, + "endpoint_2": {CreateServingEndpoint: &serving.CreateServingEndpoint{}}, + }, + }, + }, + } + + m := mocks.NewMockWorkspaceClient(t) + b.SetWorkpaceClient(m.WorkspaceClient) + workspaceApi := m.GetMockWorkspaceAPI() + workspaceApi.EXPECT().GetStatusByPath(mock.Anything, "/Some/Root/Path").Return(&workspace.ObjectInfo{ + ObjectId: 1, + }, nil) + workspaceApi.EXPECT().GetStatusByPath(mock.Anything, "/Users/foo@bar.com/artifacts").Return(&workspace.ObjectInfo{ + ObjectId: 2, + }, nil) + workspaceApi.EXPECT().GetStatusByPath(mock.Anything, "/Users/foo@bar.com/files").Return(&workspace.ObjectInfo{ + ObjectId: 3, + }, nil) + workspaceApi.EXPECT().GetStatusByPath(mock.Anything, "/Users/foo@bar.com/state").Return(&workspace.ObjectInfo{ + ObjectId: 4, + }, nil) + workspaceApi.EXPECT().GetStatusByPath(mock.Anything, "/Users/foo@bar.com/resources").Return(&workspace.ObjectInfo{ + ObjectId: 5, + }, nil) + + workspaceApi.EXPECT().SetPermissions(mock.Anything, workspace.WorkspaceObjectPermissionsRequest{ + AccessControlList: []workspace.WorkspaceObjectAccessControlRequest{ + {UserName: "TestUser", PermissionLevel: "CAN_MANAGE"}, + {GroupName: "TestGroup", PermissionLevel: "CAN_READ"}, + {ServicePrincipalName: "TestServicePrincipal", PermissionLevel: "CAN_RUN"}, + }, + WorkspaceObjectId: "1", + WorkspaceObjectType: "directories", + }).Return(nil, nil) + + workspaceApi.EXPECT().SetPermissions(mock.Anything, workspace.WorkspaceObjectPermissionsRequest{ + AccessControlList: []workspace.WorkspaceObjectAccessControlRequest{ + {UserName: "TestUser", PermissionLevel: "CAN_MANAGE"}, + {GroupName: "TestGroup", PermissionLevel: "CAN_READ"}, + {ServicePrincipalName: "TestServicePrincipal", PermissionLevel: "CAN_RUN"}, + }, + WorkspaceObjectId: "2", + WorkspaceObjectType: "directories", + }).Return(nil, nil) + + workspaceApi.EXPECT().SetPermissions(mock.Anything, workspace.WorkspaceObjectPermissionsRequest{ + AccessControlList: []workspace.WorkspaceObjectAccessControlRequest{ + {UserName: "TestUser", PermissionLevel: "CAN_MANAGE"}, + {GroupName: "TestGroup", PermissionLevel: "CAN_READ"}, + {ServicePrincipalName: "TestServicePrincipal", PermissionLevel: "CAN_RUN"}, + }, + WorkspaceObjectId: "3", + WorkspaceObjectType: "directories", + }).Return(nil, nil) + + workspaceApi.EXPECT().SetPermissions(mock.Anything, workspace.WorkspaceObjectPermissionsRequest{ + AccessControlList: []workspace.WorkspaceObjectAccessControlRequest{ + {UserName: "TestUser", PermissionLevel: "CAN_MANAGE"}, + {GroupName: "TestGroup", PermissionLevel: "CAN_READ"}, + {ServicePrincipalName: "TestServicePrincipal", PermissionLevel: "CAN_RUN"}, + }, + WorkspaceObjectId: "4", + WorkspaceObjectType: "directories", + }).Return(nil, nil) + + workspaceApi.EXPECT().SetPermissions(mock.Anything, workspace.WorkspaceObjectPermissionsRequest{ + AccessControlList: []workspace.WorkspaceObjectAccessControlRequest{ + {UserName: "TestUser", PermissionLevel: "CAN_MANAGE"}, + {GroupName: "TestGroup", PermissionLevel: "CAN_READ"}, + {ServicePrincipalName: "TestServicePrincipal", PermissionLevel: "CAN_RUN"}, + }, + WorkspaceObjectId: "5", + WorkspaceObjectType: "directories", + }).Return(nil, nil) + + diags := bundle.Apply(context.Background(), b, ApplyWorkspaceRootPermissions()) + require.NoError(t, diags.Error()) +} diff --git a/bundle/phases/deploy.go b/bundle/phases/deploy.go index cb0ecf75..e623c364 100644 --- a/bundle/phases/deploy.go +++ b/bundle/phases/deploy.go @@ -152,6 +152,7 @@ func Deploy(outputHandler sync.OutputHandler) bundle.Mutator { bundle.Defer( bundle.Seq( terraform.StatePull(), + terraform.CheckDashboardsModifiedRemotely(), deploy.StatePull(), mutator.ValidateGitDetails(), artifacts.CleanUp(), diff --git a/bundle/phases/initialize.go b/bundle/phases/initialize.go index 5582016f..3d5ad5e8 100644 --- a/bundle/phases/initialize.go +++ b/bundle/phases/initialize.go @@ -66,6 +66,7 @@ func Initialize() bundle.Mutator { permissions.PermissionDiagnostics(), mutator.SetRunAs(), mutator.OverrideCompute(), + mutator.ConfigureDashboardDefaults(), mutator.ProcessTargetMode(), mutator.ApplyPresets(), mutator.DefaultQueueing(), diff --git a/bundle/resources/completion.go b/bundle/resources/completion.go new file mode 100644 index 00000000..c1bcd022 --- /dev/null +++ b/bundle/resources/completion.go @@ -0,0 +1,17 @@ +package resources + +import "github.com/databricks/cli/bundle" + +// Completions returns the same as [References] except +// that every key maps directly to a single reference. +func Completions(b *bundle.Bundle, filters ...Filter) map[string]Reference { + out := make(map[string]Reference) + keyOnlyRefs, _ := References(b, filters...) + for k, refs := range keyOnlyRefs { + if len(refs) != 1 { + continue + } + out[k] = refs[0] + } + return out +} diff --git a/bundle/resources/completion_test.go b/bundle/resources/completion_test.go new file mode 100644 index 00000000..2f7942aa --- /dev/null +++ b/bundle/resources/completion_test.go @@ -0,0 +1,58 @@ +package resources + +import ( + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/config/resources" + "github.com/stretchr/testify/assert" +) + +func TestCompletions_SkipDuplicates(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "foo": {}, + "bar": {}, + }, + Pipelines: map[string]*resources.Pipeline{ + "foo": {}, + }, + }, + }, + } + + // Test that this skips duplicates and only includes unambiguous completions. + out := Completions(b) + if assert.Len(t, out, 1) { + assert.Contains(t, out, "bar") + } +} + +func TestCompletions_Filter(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "foo": {}, + }, + Pipelines: map[string]*resources.Pipeline{ + "bar": {}, + }, + }, + }, + } + + includeJobs := func(ref Reference) bool { + _, ok := ref.Resource.(*resources.Job) + return ok + } + + // Test that this does not include the pipeline. + out := Completions(b, includeJobs) + if assert.Len(t, out, 1) { + assert.Contains(t, out, "foo") + } +} diff --git a/bundle/resources/lookup.go b/bundle/resources/lookup.go new file mode 100644 index 00000000..48d86223 --- /dev/null +++ b/bundle/resources/lookup.go @@ -0,0 +1,98 @@ +package resources + +import ( + "fmt" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" +) + +// Reference is a reference to a resource. +// It includes the resource type description, and a reference to the resource itself. +type Reference struct { + // Key is the unique key of the resource, e.g. "my_job". + Key string + + // KeyWithType is the unique key of the resource, including the resource type, e.g. "jobs.my_job". + KeyWithType string + + // Description is the resource type description. + Description config.ResourceDescription + + // Resource is the resource itself. + Resource config.ConfigResource +} + +// Map is the core type for resource lookup and completion. +type Map map[string][]Reference + +// Filter defines the function signature for filtering resources. +type Filter func(Reference) bool + +// includeReference checks if the specified reference passes all filters. +// If the list of filters is empty, the reference is always included. +func includeReference(filters []Filter, ref Reference) bool { + for _, filter := range filters { + if !filter(ref) { + return false + } + } + return true +} + +// References returns maps of resource keys to a slice of [Reference]. +// +// The first map is indexed by the resource key only. +// The second map is indexed by the resource type name and its key. +// +// While the return types allows for multiple resources to share the same key, +// this is confirmed not to happen in the [validate.UniqueResourceKeys] mutator. +func References(b *bundle.Bundle, filters ...Filter) (Map, Map) { + keyOnly := make(Map) + keyWithType := make(Map) + + // Collect map of resource references indexed by their keys. + for _, group := range b.Config.Resources.AllResources() { + for k, v := range group.Resources { + ref := Reference{ + Key: k, + KeyWithType: fmt.Sprintf("%s.%s", group.Description.PluralName, k), + Description: group.Description, + Resource: v, + } + + // Skip resources that do not pass all filters. + if !includeReference(filters, ref) { + continue + } + + keyOnly[ref.Key] = append(keyOnly[ref.Key], ref) + keyWithType[ref.KeyWithType] = append(keyWithType[ref.KeyWithType], ref) + } + } + + return keyOnly, keyWithType +} + +// Lookup returns the resource with the specified key. +// If the key maps to more than one resource, an error is returned. +// If the key does not map to any resource, an error is returned. +func Lookup(b *bundle.Bundle, key string, filters ...Filter) (Reference, error) { + keyOnlyRefs, keyWithTypeRefs := References(b, filters...) + refs, ok := keyOnlyRefs[key] + if !ok { + refs, ok = keyWithTypeRefs[key] + if !ok { + return Reference{}, fmt.Errorf("resource with key %q not found", key) + } + } + + switch { + case len(refs) == 1: + return refs[0], nil + case len(refs) > 1: + return Reference{}, fmt.Errorf("multiple resources with key %q found", key) + default: + panic("unreachable") + } +} diff --git a/bundle/resources/lookup_test.go b/bundle/resources/lookup_test.go new file mode 100644 index 00000000..b2eaafd1 --- /dev/null +++ b/bundle/resources/lookup_test.go @@ -0,0 +1,117 @@ +package resources + +import ( + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/databricks-sdk-go/service/jobs" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestLookup_EmptyBundle(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Resources: config.Resources{}, + }, + } + + _, err := Lookup(b, "foo") + require.Error(t, err) + assert.ErrorContains(t, err, "resource with key \"foo\" not found") +} + +func TestLookup_NotFound(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "foo": {}, + "bar": {}, + }, + }, + }, + } + + _, err := Lookup(b, "qux") + require.Error(t, err) + assert.ErrorContains(t, err, `resource with key "qux" not found`) +} + +func TestLookup_MultipleFound(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "foo": {}, + }, + Pipelines: map[string]*resources.Pipeline{ + "foo": {}, + }, + }, + }, + } + + _, err := Lookup(b, "foo") + require.Error(t, err) + assert.ErrorContains(t, err, `multiple resources with key "foo" found`) +} + +func TestLookup_Nominal(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "foo": { + JobSettings: &jobs.JobSettings{ + Name: "Foo job", + }, + }, + }, + }, + }, + } + + // Lookup by key only. + out, err := Lookup(b, "foo") + if assert.NoError(t, err) { + assert.Equal(t, "Foo job", out.Resource.GetName()) + } + + // Lookup by type and key. + out, err = Lookup(b, "jobs.foo") + if assert.NoError(t, err) { + assert.Equal(t, "Foo job", out.Resource.GetName()) + } +} + +func TestLookup_NominalWithFilters(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "foo": {}, + }, + Pipelines: map[string]*resources.Pipeline{ + "bar": {}, + }, + }, + }, + } + + includeJobs := func(ref Reference) bool { + _, ok := ref.Resource.(*resources.Job) + return ok + } + + // This should succeed because the filter includes jobs. + _, err := Lookup(b, "foo", includeJobs) + require.NoError(t, err) + + // This should fail because the filter excludes pipelines. + _, err = Lookup(b, "bar", includeJobs) + require.Error(t, err) + assert.ErrorContains(t, err, `resource with key "bar" not found`) +} diff --git a/bundle/run/job.go b/bundle/run/job.go index 8003c7d2..340af961 100644 --- a/bundle/run/job.go +++ b/bundle/run/job.go @@ -317,6 +317,29 @@ func (r *jobRunner) Cancel(ctx context.Context) error { return errGroup.Wait() } +func (r *jobRunner) Restart(ctx context.Context, opts *Options) (output.RunOutput, error) { + // We don't need to cancel existing runs if the job is continuous and unpaused. + // the /jobs/run-now API will automatically cancel any existing runs before starting a new one. + // + // /jobs/run-now will not cancel existing runs if the job is continuous and paused. + // New job runs will be queued instead and will wait for existing runs to finish. + // In this case, we need to cancel the existing runs before starting a new one. + continuous := r.job.JobSettings.Continuous + if continuous != nil && continuous.PauseStatus == jobs.PauseStatusUnpaused { + return r.Run(ctx, opts) + } + + s := cmdio.Spinner(ctx) + s <- "Cancelling all active job runs" + err := r.Cancel(ctx) + close(s) + if err != nil { + return nil, err + } + + return r.Run(ctx, opts) +} + func (r *jobRunner) ParseArgs(args []string, opts *Options) error { return r.posArgsHandler().ParseArgs(args, opts) } diff --git a/bundle/run/job_test.go b/bundle/run/job_test.go index be189306..369c546a 100644 --- a/bundle/run/job_test.go +++ b/bundle/run/job_test.go @@ -1,6 +1,7 @@ package run import ( + "bytes" "context" "testing" "time" @@ -8,6 +9,8 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" "github.com/databricks/databricks-sdk-go/experimental/mocks" "github.com/databricks/databricks-sdk-go/service/jobs" "github.com/stretchr/testify/mock" @@ -126,3 +129,132 @@ func TestJobRunnerCancelWithNoActiveRuns(t *testing.T) { err := runner.Cancel(context.Background()) require.NoError(t, err) } + +func TestJobRunnerRestart(t *testing.T) { + for _, jobSettings := range []*jobs.JobSettings{ + {}, + { + Continuous: &jobs.Continuous{ + PauseStatus: jobs.PauseStatusPaused, + }, + }, + } { + job := &resources.Job{ + ID: "123", + JobSettings: jobSettings, + } + b := &bundle.Bundle{ + Config: config.Root{ + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "test_job": job, + }, + }, + }, + } + + runner := jobRunner{key: "test", bundle: b, job: job} + + m := mocks.NewMockWorkspaceClient(t) + b.SetWorkpaceClient(m.WorkspaceClient) + ctx := context.Background() + ctx = cmdio.InContext(ctx, cmdio.NewIO(flags.OutputText, &bytes.Buffer{}, &bytes.Buffer{}, &bytes.Buffer{}, "", "")) + ctx = cmdio.NewContext(ctx, cmdio.NewLogger(flags.ModeAppend)) + + jobApi := m.GetMockJobsAPI() + jobApi.EXPECT().ListRunsAll(mock.Anything, jobs.ListRunsRequest{ + ActiveOnly: true, + JobId: 123, + }).Return([]jobs.BaseRun{ + {RunId: 1}, + {RunId: 2}, + }, nil) + + // Mock the runner cancelling existing job runs. + mockWait := &jobs.WaitGetRunJobTerminatedOrSkipped[struct{}]{ + Poll: func(time time.Duration, f func(j *jobs.Run)) (*jobs.Run, error) { + return nil, nil + }, + } + jobApi.EXPECT().CancelRun(mock.Anything, jobs.CancelRun{ + RunId: 1, + }).Return(mockWait, nil) + jobApi.EXPECT().CancelRun(mock.Anything, jobs.CancelRun{ + RunId: 2, + }).Return(mockWait, nil) + + // Mock the runner triggering a job run + mockWaitForRun := &jobs.WaitGetRunJobTerminatedOrSkipped[jobs.RunNowResponse]{ + Poll: func(d time.Duration, f func(*jobs.Run)) (*jobs.Run, error) { + return &jobs.Run{ + State: &jobs.RunState{ + ResultState: jobs.RunResultStateSuccess, + }, + }, nil + }, + } + jobApi.EXPECT().RunNow(mock.Anything, jobs.RunNow{ + JobId: 123, + }).Return(mockWaitForRun, nil) + + // Mock the runner getting the job output + jobApi.EXPECT().GetRun(mock.Anything, jobs.GetRunRequest{}).Return(&jobs.Run{}, nil) + + _, err := runner.Restart(ctx, &Options{}) + require.NoError(t, err) + } +} + +func TestJobRunnerRestartForContinuousUnpausedJobs(t *testing.T) { + job := &resources.Job{ + ID: "123", + JobSettings: &jobs.JobSettings{ + Continuous: &jobs.Continuous{ + PauseStatus: jobs.PauseStatusUnpaused, + }, + }, + } + b := &bundle.Bundle{ + Config: config.Root{ + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "test_job": job, + }, + }, + }, + } + + runner := jobRunner{key: "test", bundle: b, job: job} + + m := mocks.NewMockWorkspaceClient(t) + b.SetWorkpaceClient(m.WorkspaceClient) + ctx := context.Background() + ctx = cmdio.InContext(ctx, cmdio.NewIO(flags.OutputText, &bytes.Buffer{}, &bytes.Buffer{}, &bytes.Buffer{}, "", "...")) + ctx = cmdio.NewContext(ctx, cmdio.NewLogger(flags.ModeAppend)) + + jobApi := m.GetMockJobsAPI() + + // The runner should not try and cancel existing job runs for unpaused continuous jobs. + jobApi.AssertNotCalled(t, "ListRunsAll") + jobApi.AssertNotCalled(t, "CancelRun") + + // Mock the runner triggering a job run + mockWaitForRun := &jobs.WaitGetRunJobTerminatedOrSkipped[jobs.RunNowResponse]{ + Poll: func(d time.Duration, f func(*jobs.Run)) (*jobs.Run, error) { + return &jobs.Run{ + State: &jobs.RunState{ + ResultState: jobs.RunResultStateSuccess, + }, + }, nil + }, + } + jobApi.EXPECT().RunNow(mock.Anything, jobs.RunNow{ + JobId: 123, + }).Return(mockWaitForRun, nil) + + // Mock the runner getting the job output + jobApi.EXPECT().GetRun(mock.Anything, jobs.GetRunRequest{}).Return(&jobs.Run{}, nil) + + _, err := runner.Restart(ctx, &Options{}) + require.NoError(t, err) +} diff --git a/bundle/run/keys.go b/bundle/run/keys.go deleted file mode 100644 index 76ec50ac..00000000 --- a/bundle/run/keys.go +++ /dev/null @@ -1,69 +0,0 @@ -package run - -import ( - "fmt" - - "github.com/databricks/cli/bundle" - "golang.org/x/exp/maps" -) - -// RunnerLookup maps identifiers to a list of workloads that match that identifier. -// The list can have more than 1 entry if resources of different types use the -// same key. When this happens, the user should disambiguate between them. -type RunnerLookup map[string][]Runner - -// ResourceKeys computes a map with -func ResourceKeys(b *bundle.Bundle) (keyOnly RunnerLookup, keyWithType RunnerLookup) { - keyOnly = make(RunnerLookup) - keyWithType = make(RunnerLookup) - - r := b.Config.Resources - for k, v := range r.Jobs { - kt := fmt.Sprintf("jobs.%s", k) - w := jobRunner{key: key(kt), bundle: b, job: v} - keyOnly[k] = append(keyOnly[k], &w) - keyWithType[kt] = append(keyWithType[kt], &w) - } - for k, v := range r.Pipelines { - kt := fmt.Sprintf("pipelines.%s", k) - w := pipelineRunner{key: key(kt), bundle: b, pipeline: v} - keyOnly[k] = append(keyOnly[k], &w) - keyWithType[kt] = append(keyWithType[kt], &w) - } - return -} - -// ResourceCompletionMap returns a map of resource keys to their respective names. -func ResourceCompletionMap(b *bundle.Bundle) map[string]string { - out := make(map[string]string) - keyOnly, keyWithType := ResourceKeys(b) - - // Keep track of resources we have seen by their fully qualified key. - seen := make(map[string]bool) - - // First add resources that can be identified by key alone. - for k, v := range keyOnly { - // Invariant: len(v) >= 1. See [ResourceKeys]. - if len(v) == 1 { - seen[v[0].Key()] = true - out[k] = v[0].Name() - } - } - - // Then add resources that can only be identified by their type and key. - for k, v := range keyWithType { - // Invariant: len(v) == 1. See [ResourceKeys]. - _, ok := seen[v[0].Key()] - if ok { - continue - } - out[k] = v[0].Name() - } - - return out -} - -// ResourceCompletions returns a list of keys that unambiguously reference resources in the bundle. -func ResourceCompletions(b *bundle.Bundle) []string { - return maps.Keys(ResourceCompletionMap(b)) -} diff --git a/bundle/run/keys_test.go b/bundle/run/keys_test.go deleted file mode 100644 index 5ab73b13..00000000 --- a/bundle/run/keys_test.go +++ /dev/null @@ -1,25 +0,0 @@ -package run - -import ( - "testing" - - "github.com/databricks/cli/bundle" - "github.com/databricks/cli/bundle/config" - "github.com/databricks/cli/bundle/config/resources" - "github.com/stretchr/testify/assert" -) - -func TestResourceCompletionsUnique(t *testing.T) { - b := &bundle.Bundle{ - Config: config.Root{ - Resources: config.Resources{ - Jobs: map[string]*resources.Job{ - "foo": {}, - "bar": {}, - }, - }, - }, - } - - assert.ElementsMatch(t, []string{"foo", "bar"}, ResourceCompletions(b)) -} diff --git a/bundle/run/pipeline.go b/bundle/run/pipeline.go index 092e6dcb..a2206483 100644 --- a/bundle/run/pipeline.go +++ b/bundle/run/pipeline.go @@ -178,6 +178,18 @@ func (r *pipelineRunner) Cancel(ctx context.Context) error { return err } +func (r *pipelineRunner) Restart(ctx context.Context, opts *Options) (output.RunOutput, error) { + s := cmdio.Spinner(ctx) + s <- "Cancelling the active pipeline update" + err := r.Cancel(ctx) + close(s) + if err != nil { + return nil, err + } + + return r.Run(ctx, opts) +} + func (r *pipelineRunner) ParseArgs(args []string, opts *Options) error { if len(args) == 0 { return nil diff --git a/bundle/run/pipeline_test.go b/bundle/run/pipeline_test.go index 29b57ffd..e4608061 100644 --- a/bundle/run/pipeline_test.go +++ b/bundle/run/pipeline_test.go @@ -1,6 +1,7 @@ package run import ( + "bytes" "context" "testing" "time" @@ -8,8 +9,12 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + sdk_config "github.com/databricks/databricks-sdk-go/config" "github.com/databricks/databricks-sdk-go/experimental/mocks" "github.com/databricks/databricks-sdk-go/service/pipelines" + "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" ) @@ -47,3 +52,68 @@ func TestPipelineRunnerCancel(t *testing.T) { err := runner.Cancel(context.Background()) require.NoError(t, err) } + +func TestPipelineRunnerRestart(t *testing.T) { + pipeline := &resources.Pipeline{ + ID: "123", + } + + b := &bundle.Bundle{ + Config: config.Root{ + Resources: config.Resources{ + Pipelines: map[string]*resources.Pipeline{ + "test_pipeline": pipeline, + }, + }, + }, + } + + runner := pipelineRunner{key: "test", bundle: b, pipeline: pipeline} + + m := mocks.NewMockWorkspaceClient(t) + m.WorkspaceClient.Config = &sdk_config.Config{ + Host: "https://test.com", + } + b.SetWorkpaceClient(m.WorkspaceClient) + ctx := context.Background() + ctx = cmdio.InContext(ctx, cmdio.NewIO(flags.OutputText, &bytes.Buffer{}, &bytes.Buffer{}, &bytes.Buffer{}, "", "...")) + ctx = cmdio.NewContext(ctx, cmdio.NewLogger(flags.ModeAppend)) + + mockWait := &pipelines.WaitGetPipelineIdle[struct{}]{ + Poll: func(time.Duration, func(*pipelines.GetPipelineResponse)) (*pipelines.GetPipelineResponse, error) { + return nil, nil + }, + } + + pipelineApi := m.GetMockPipelinesAPI() + pipelineApi.EXPECT().Stop(mock.Anything, pipelines.StopRequest{ + PipelineId: "123", + }).Return(mockWait, nil) + + pipelineApi.EXPECT().GetByPipelineId(mock.Anything, "123").Return(&pipelines.GetPipelineResponse{}, nil) + + // Mock runner starting a new update + pipelineApi.EXPECT().StartUpdate(mock.Anything, pipelines.StartUpdate{ + PipelineId: "123", + }).Return(&pipelines.StartUpdateResponse{ + UpdateId: "456", + }, nil) + + // Mock runner polling for events + pipelineApi.EXPECT().ListPipelineEventsAll(mock.Anything, pipelines.ListPipelineEventsRequest{ + Filter: `update_id = '456'`, + MaxResults: 100, + PipelineId: "123", + }).Return([]pipelines.PipelineEvent{}, nil) + + // Mock runner polling for update status + pipelineApi.EXPECT().GetUpdateByPipelineIdAndUpdateId(mock.Anything, "123", "456"). + Return(&pipelines.GetUpdateResponse{ + Update: &pipelines.UpdateInfo{ + State: pipelines.UpdateInfoStateCompleted, + }, + }, nil) + + _, err := runner.Restart(ctx, &Options{}) + require.NoError(t, err) +} diff --git a/bundle/run/runner.go b/bundle/run/runner.go index 0f202ce7..4c907d06 100644 --- a/bundle/run/runner.go +++ b/bundle/run/runner.go @@ -3,9 +3,10 @@ package run import ( "context" "fmt" - "strings" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config/resources" + refs "github.com/databricks/cli/bundle/resources" "github.com/databricks/cli/bundle/run/output" ) @@ -27,6 +28,10 @@ type Runner interface { // Run the underlying worklow. Run(ctx context.Context, opts *Options) (output.RunOutput, error) + // Restart the underlying workflow by cancelling any existing runs before + // starting a new one. + Restart(ctx context.Context, opts *Options) (output.RunOutput, error) + // Cancel the underlying workflow. Cancel(ctx context.Context) error @@ -34,34 +39,24 @@ type Runner interface { argsHandler } -// Find locates a runner matching the specified argument. -// -// Its behavior is as follows: -// 1. Try to find a resource with identical to the argument. -// 2. Try to find a resource with . identical to the argument. -// -// If an argument resolves to multiple resources, it returns an error. -func Find(b *bundle.Bundle, arg string) (Runner, error) { - keyOnly, keyWithType := ResourceKeys(b) - if len(keyWithType) == 0 { - return nil, fmt.Errorf("bundle defines no resources") +// IsRunnable returns a filter that only allows runnable resources. +func IsRunnable(ref refs.Reference) bool { + switch ref.Resource.(type) { + case *resources.Job, *resources.Pipeline: + return true + default: + return false + } +} + +// ToRunner converts a resource reference to a runnable resource. +func ToRunner(b *bundle.Bundle, ref refs.Reference) (Runner, error) { + switch resource := ref.Resource.(type) { + case *resources.Job: + return &jobRunner{key: key(ref.KeyWithType), bundle: b, job: resource}, nil + case *resources.Pipeline: + return &pipelineRunner{key: key(ref.KeyWithType), bundle: b, pipeline: resource}, nil + default: + return nil, fmt.Errorf("unsupported resource type: %T", resource) } - - runners, ok := keyOnly[arg] - if !ok { - runners, ok = keyWithType[arg] - if !ok { - return nil, fmt.Errorf("no such resource: %s", arg) - } - } - - if len(runners) != 1 { - var keys []string - for _, runner := range runners { - keys = append(keys, runner.Key()) - } - return nil, fmt.Errorf("ambiguous: %s (can resolve to all of %s)", arg, strings.Join(keys, ", ")) - } - - return runners[0], nil } diff --git a/bundle/run/runner_test.go b/bundle/run/runner_test.go index 85baa192..2fc5fa6f 100644 --- a/bundle/run/runner_test.go +++ b/bundle/run/runner_test.go @@ -3,82 +3,14 @@ package run import ( "testing" - "github.com/databricks/cli/bundle" - "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config/resources" + refs "github.com/databricks/cli/bundle/resources" "github.com/stretchr/testify/assert" ) -func TestFindNoResources(t *testing.T) { - b := &bundle.Bundle{ - Config: config.Root{ - Resources: config.Resources{}, - }, - } - - _, err := Find(b, "foo") - assert.ErrorContains(t, err, "bundle defines no resources") -} - -func TestFindSingleArg(t *testing.T) { - b := &bundle.Bundle{ - Config: config.Root{ - Resources: config.Resources{ - Jobs: map[string]*resources.Job{ - "foo": {}, - }, - }, - }, - } - - _, err := Find(b, "foo") - assert.NoError(t, err) -} - -func TestFindSingleArgNotFound(t *testing.T) { - b := &bundle.Bundle{ - Config: config.Root{ - Resources: config.Resources{ - Jobs: map[string]*resources.Job{ - "foo": {}, - }, - }, - }, - } - - _, err := Find(b, "bar") - assert.ErrorContains(t, err, "no such resource: bar") -} - -func TestFindSingleArgAmbiguous(t *testing.T) { - b := &bundle.Bundle{ - Config: config.Root{ - Resources: config.Resources{ - Jobs: map[string]*resources.Job{ - "key": {}, - }, - Pipelines: map[string]*resources.Pipeline{ - "key": {}, - }, - }, - }, - } - - _, err := Find(b, "key") - assert.ErrorContains(t, err, "ambiguous: ") -} - -func TestFindSingleArgWithType(t *testing.T) { - b := &bundle.Bundle{ - Config: config.Root{ - Resources: config.Resources{ - Jobs: map[string]*resources.Job{ - "key": {}, - }, - }, - }, - } - - _, err := Find(b, "jobs.key") - assert.NoError(t, err) +func TestRunner_IsRunnable(t *testing.T) { + assert.True(t, IsRunnable(refs.Reference{Resource: &resources.Job{}})) + assert.True(t, IsRunnable(refs.Reference{Resource: &resources.Pipeline{}})) + assert.False(t, IsRunnable(refs.Reference{Resource: &resources.MlflowModel{}})) + assert.False(t, IsRunnable(refs.Reference{Resource: &resources.MlflowExperiment{}})) } diff --git a/bundle/schema/embed_test.go b/bundle/schema/embed_test.go index dcb381b8..e4b45baa 100644 --- a/bundle/schema/embed_test.go +++ b/bundle/schema/embed_test.go @@ -59,9 +59,14 @@ func TestJsonSchema(t *testing.T) { } // Assert enum values are loaded - schedule := walk(s.Definitions, "github.com", "databricks", "databricks-sdk-go", "service", "catalog.MonitorCronSchedule") - assert.Contains(t, schedule.AnyOf[0].Properties["pause_status"].Enum, "PAUSED") - assert.Contains(t, schedule.AnyOf[0].Properties["pause_status"].Enum, "UNPAUSED") + schedule := walk(s.Definitions, "github.com", "databricks", "databricks-sdk-go", "service", "pipelines.RestartWindow") + assert.Contains(t, schedule.AnyOf[0].Properties["days_of_week"].Enum, "MONDAY") + assert.Contains(t, schedule.AnyOf[0].Properties["days_of_week"].Enum, "TUESDAY") + assert.Contains(t, schedule.AnyOf[0].Properties["days_of_week"].Enum, "WEDNESDAY") + assert.Contains(t, schedule.AnyOf[0].Properties["days_of_week"].Enum, "THURSDAY") + assert.Contains(t, schedule.AnyOf[0].Properties["days_of_week"].Enum, "FRIDAY") + assert.Contains(t, schedule.AnyOf[0].Properties["days_of_week"].Enum, "SATURDAY") + assert.Contains(t, schedule.AnyOf[0].Properties["days_of_week"].Enum, "SUNDAY") providers := walk(s.Definitions, "github.com", "databricks", "databricks-sdk-go", "service", "jobs.GitProvider") assert.Contains(t, providers.Enum, "gitHub") diff --git a/bundle/schema/jsonschema.json b/bundle/schema/jsonschema.json index 06b9cc15..703daafe 100644 --- a/bundle/schema/jsonschema.json +++ b/bundle/schema/jsonschema.json @@ -180,6 +180,69 @@ } ] }, + "resources.Dashboard": { + "anyOf": [ + { + "type": "object", + "properties": { + "create_time": { + "description": "The timestamp of when the dashboard was created.", + "$ref": "#/$defs/string" + }, + "dashboard_id": { + "description": "UUID identifying the dashboard.", + "$ref": "#/$defs/string" + }, + "display_name": { + "description": "The display name of the dashboard.", + "$ref": "#/$defs/string" + }, + "embed_credentials": { + "$ref": "#/$defs/bool" + }, + "etag": { + "description": "The etag for the dashboard. Can be optionally provided on updates to ensure that the dashboard\nhas not been modified since the last read.\nThis field is excluded in List Dashboards responses.", + "$ref": "#/$defs/string" + }, + "file_path": { + "$ref": "#/$defs/string" + }, + "lifecycle_state": { + "description": "The state of the dashboard resource. Used for tracking trashed status.", + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/dashboards.LifecycleState" + }, + "parent_path": { + "description": "The workspace path of the folder containing the dashboard. Includes leading slash and no\ntrailing slash.\nThis field is excluded in List Dashboards responses.", + "$ref": "#/$defs/string" + }, + "path": { + "description": "The workspace path of the dashboard asset, including the file name.\nExported dashboards always have the file extension `.lvdash.json`.\nThis field is excluded in List Dashboards responses.", + "$ref": "#/$defs/string" + }, + "permissions": { + "$ref": "#/$defs/slice/github.com/databricks/cli/bundle/config/resources.Permission" + }, + "serialized_dashboard": { + "description": "The contents of the dashboard in serialized string form.\nThis field is excluded in List Dashboards responses.\nUse the [get dashboard API](https://docs.databricks.com/api/workspace/lakeview/get)\nto retrieve an example response, which includes the `serialized_dashboard` field.\nThis field provides the structure of the JSON string that represents the dashboard's\nlayout and components.", + "$ref": "#/$defs/interface" + }, + "update_time": { + "description": "The timestamp of when the dashboard was last updated by the user.\nThis field is excluded in List Dashboards responses.", + "$ref": "#/$defs/string" + }, + "warehouse_id": { + "description": "The warehouse ID used to run the dashboard.", + "$ref": "#/$defs/string" + } + }, + "additionalProperties": false + }, + { + "type": "string", + "pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}" + } + ] + }, "resources.Grant": { "anyOf": [ { @@ -209,6 +272,10 @@ { "type": "object", "properties": { + "budget_policy_id": { + "description": "The id of the user specified budget policy to use for this job.\nIf not specified, a default budget policy may be applied when creating or modifying the job.\nSee `effective_budget_policy_id` for the budget policy used by this workload.", + "$ref": "#/$defs/string" + }, "continuous": { "description": "An optional continuous property for this job. The continuous property will ensure that there is always one run executing. Only one of `schedule` and `continuous` can be used.", "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.Continuous" @@ -505,7 +572,7 @@ "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.Filters" }, "gateway_definition": { - "description": "The definition of a gateway pipeline to support CDC.", + "description": "The definition of a gateway pipeline to support change data capture.", "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.IngestionGatewayPipelineDefinition" }, "id": { @@ -535,6 +602,10 @@ "description": "Whether Photon is enabled for this pipeline.", "$ref": "#/$defs/bool" }, + "restart_window": { + "description": "Restart window of this pipeline.", + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.RestartWindow" + }, "schema": { "description": "The default schema (database) where tables are read from or published to. The presence of this field implies that the pipeline is in direct publishing mode.", "$ref": "#/$defs/string" @@ -613,6 +684,9 @@ "description": "Configuration for monitoring snapshot tables.", "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/catalog.MonitorSnapshot" }, + "table_name": { + "$ref": "#/$defs/string" + }, "time_series": { "description": "Configuration for monitoring time series tables.", "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/catalog.MonitorTimeSeries" @@ -624,6 +698,7 @@ }, "additionalProperties": false, "required": [ + "table_name", "assets_dir", "output_schema_name" ] @@ -1050,6 +1125,9 @@ "clusters": { "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.Cluster" }, + "dashboards": { + "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.Dashboard" + }, "experiments": { "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.MlflowExperiment" }, @@ -1240,11 +1318,7 @@ "properties": { "pause_status": { "description": "Read only field that indicates whether a schedule is paused or not.", - "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/catalog.MonitorCronSchedulePauseStatus", - "enum": [ - "UNPAUSED", - "PAUSED" - ] + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/catalog.MonitorCronSchedulePauseStatus" }, "quartz_cron_expression": { "description": "The expression that determines when to run the monitor. See [examples](https://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/crontrigger.html).\n", @@ -1268,7 +1342,12 @@ ] }, "catalog.MonitorCronSchedulePauseStatus": { - "type": "string" + "type": "string", + "description": "Read only field that indicates whether a schedule is paused or not.", + "enum": [ + "UNPAUSED", + "PAUSED" + ] }, "catalog.MonitorDataClassificationConfig": { "anyOf": [ @@ -1333,11 +1412,7 @@ }, "problem_type": { "description": "Problem type the model aims to solve. Determines the type of model-quality metrics that will be computed.", - "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/catalog.MonitorInferenceLogProblemType", - "enum": [ - "PROBLEM_TYPE_CLASSIFICATION", - "PROBLEM_TYPE_REGRESSION" - ] + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/catalog.MonitorInferenceLogProblemType" }, "timestamp_col": { "description": "Column that contains the timestamps of requests. The column must be one of the following:\n- A ``TimestampType`` column\n- A column whose values can be converted to timestamps through the pyspark\n ``to_timestamp`` [function](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.to_timestamp.html).\n", @@ -1360,7 +1435,12 @@ ] }, "catalog.MonitorInferenceLogProblemType": { - "type": "string" + "type": "string", + "description": "Problem type the model aims to solve. Determines the type of model-quality metrics that will be computed.", + "enum": [ + "PROBLEM_TYPE_CLASSIFICATION", + "PROBLEM_TYPE_REGRESSION" + ] }, "catalog.MonitorMetric": { "anyOf": [ @@ -1385,12 +1465,7 @@ }, "type": { "description": "Can only be one of ``\"CUSTOM_METRIC_TYPE_AGGREGATE\"``, ``\"CUSTOM_METRIC_TYPE_DERIVED\"``, or ``\"CUSTOM_METRIC_TYPE_DRIFT\"``.\nThe ``\"CUSTOM_METRIC_TYPE_AGGREGATE\"`` and ``\"CUSTOM_METRIC_TYPE_DERIVED\"`` metrics\nare computed on a single table, whereas the ``\"CUSTOM_METRIC_TYPE_DRIFT\"`` compare metrics across\nbaseline and input table, or across the two consecutive time windows.\n- CUSTOM_METRIC_TYPE_AGGREGATE: only depend on the existing columns in your table\n- CUSTOM_METRIC_TYPE_DERIVED: depend on previously computed aggregate metrics\n- CUSTOM_METRIC_TYPE_DRIFT: depend on previously computed aggregate or derived metrics\n", - "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/catalog.MonitorMetricType", - "enum": [ - "CUSTOM_METRIC_TYPE_AGGREGATE", - "CUSTOM_METRIC_TYPE_DERIVED", - "CUSTOM_METRIC_TYPE_DRIFT" - ] + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/catalog.MonitorMetricType" } }, "additionalProperties": false, @@ -1409,7 +1484,13 @@ ] }, "catalog.MonitorMetricType": { - "type": "string" + "type": "string", + "description": "Can only be one of ``\"CUSTOM_METRIC_TYPE_AGGREGATE\"``, ``\"CUSTOM_METRIC_TYPE_DERIVED\"``, or ``\"CUSTOM_METRIC_TYPE_DRIFT\"``.\nThe ``\"CUSTOM_METRIC_TYPE_AGGREGATE\"`` and ``\"CUSTOM_METRIC_TYPE_DERIVED\"`` metrics\nare computed on a single table, whereas the ``\"CUSTOM_METRIC_TYPE_DRIFT\"`` compare metrics across\nbaseline and input table, or across the two consecutive time windows.\n- CUSTOM_METRIC_TYPE_AGGREGATE: only depend on the existing columns in your table\n- CUSTOM_METRIC_TYPE_DERIVED: depend on previously computed aggregate metrics\n- CUSTOM_METRIC_TYPE_DRIFT: depend on previously computed aggregate or derived metrics\n", + "enum": [ + "CUSTOM_METRIC_TYPE_AGGREGATE", + "CUSTOM_METRIC_TYPE_DERIVED", + "CUSTOM_METRIC_TYPE_DRIFT" + ] }, "catalog.MonitorNotifications": { "anyOf": [ @@ -2276,6 +2357,13 @@ } ] }, + "dashboards.LifecycleState": { + "type": "string", + "enum": [ + "ACTIVE", + "TRASHED" + ] + }, "jobs.Condition": { "type": "string", "enum": [ @@ -3053,7 +3141,7 @@ "$ref": "#/$defs/slice/string" }, "jar_params": { - "description": "A list of parameters for jobs with Spark JAR tasks, for example `\"jar_params\": [\"john doe\", \"35\"]`.\nThe parameters are used to invoke the main function of the main class specified in the Spark JAR task.\nIf not specified upon `run-now`, it defaults to an empty list.\njar_params cannot be specified in conjunction with notebook_params.\nThe JSON representation of this field (for example `{\"jar_params\":[\"john doe\",\"35\"]}`) cannot exceed 10,000 bytes.\n\nUse [Task parameter variables](/jobs.html\\\"#parameter-variables\\\") to set parameters containing information about job runs.", + "description": "A list of parameters for jobs with Spark JAR tasks, for example `\"jar_params\": [\"john doe\", \"35\"]`.\nThe parameters are used to invoke the main function of the main class specified in the Spark JAR task.\nIf not specified upon `run-now`, it defaults to an empty list.\njar_params cannot be specified in conjunction with notebook_params.\nThe JSON representation of this field (for example `{\"jar_params\":[\"john doe\",\"35\"]}`) cannot exceed 10,000 bytes.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.", "$ref": "#/$defs/slice/string" }, "job_id": { @@ -3387,11 +3475,11 @@ "type": "object", "properties": { "condition_task": { - "description": "If condition_task, specifies a condition with an outcome that can be used to control the execution of other tasks. Does not require a cluster to execute and does not support retries or notifications.", + "description": "The task evaluates a condition that can be used to control the execution of other tasks when the `condition_task` field is present.\nThe condition task does not require a cluster to execute and does not support retries or notifications.", "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.ConditionTask" }, "dbt_task": { - "description": "If dbt_task, indicates that this must execute a dbt task. It requires both Databricks SQL and the ability to use a serverless or a pro SQL warehouse.", + "description": "The task runs one or more dbt commands when the `dbt_task` field is present. The dbt task requires both Databricks SQL and the ability to use a serverless or a pro SQL warehouse.", "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.DbtTask" }, "depends_on": { @@ -3419,7 +3507,7 @@ "$ref": "#/$defs/string" }, "for_each_task": { - "description": "If for_each_task, indicates that this task must execute the nested task within it.", + "description": "The task executes a nested task for every input provided when the `for_each_task` field is present.", "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.ForEachTask" }, "health": { @@ -3446,7 +3534,7 @@ "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/compute.ClusterSpec" }, "notebook_task": { - "description": "If notebook_task, indicates that this task must run a notebook. This field may not be specified in conjunction with spark_jar_task.", + "description": "The task runs a notebook when the `notebook_task` field is present.", "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.NotebookTask" }, "notification_settings": { @@ -3454,11 +3542,11 @@ "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.TaskNotificationSettings" }, "pipeline_task": { - "description": "If pipeline_task, indicates that this task must execute a Pipeline.", + "description": "The task triggers a pipeline update when the `pipeline_task` field is present. Only pipelines configured to use triggered more are supported.", "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.PipelineTask" }, "python_wheel_task": { - "description": "If python_wheel_task, indicates that this job must execute a PythonWheel.", + "description": "The task runs a Python wheel when the `python_wheel_task` field is present.", "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.PythonWheelTask" }, "retry_on_timeout": { @@ -3470,23 +3558,23 @@ "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.RunIf" }, "run_job_task": { - "description": "If run_job_task, indicates that this task must execute another job.", + "description": "The task triggers another job when the `run_job_task` field is present.", "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.RunJobTask" }, "spark_jar_task": { - "description": "If spark_jar_task, indicates that this task must run a JAR.", + "description": "The task runs a JAR when the `spark_jar_task` field is present.", "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.SparkJarTask" }, "spark_python_task": { - "description": "If spark_python_task, indicates that this task must run a Python file.", + "description": "The task runs a Python file when the `spark_python_task` field is present.", "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.SparkPythonTask" }, "spark_submit_task": { - "description": "If `spark_submit_task`, indicates that this task must be launched by the spark submit script. This task can run only on new clusters.\n\nIn the `new_cluster` specification, `libraries` and `spark_conf` are not supported. Instead, use `--jars` and `--py-files` to add Java and Python libraries and `--conf` to set the Spark configurations.\n\n`master`, `deploy-mode`, and `executor-cores` are automatically configured by Databricks; you _cannot_ specify them in parameters.\n\nBy default, the Spark submit job uses all available memory (excluding reserved memory for Databricks services). You can set `--driver-memory`, and `--executor-memory` to a smaller value to leave some room for off-heap usage.\n\nThe `--jars`, `--py-files`, `--files` arguments support DBFS and S3 paths.", + "description": "(Legacy) The task runs the spark-submit script when the `spark_submit_task` field is present. This task can run only on new clusters and is not compatible with serverless compute.\n\nIn the `new_cluster` specification, `libraries` and `spark_conf` are not supported. Instead, use `--jars` and `--py-files` to add Java and Python libraries and `--conf` to set the Spark configurations.\n\n`master`, `deploy-mode`, and `executor-cores` are automatically configured by Databricks; you _cannot_ specify them in parameters.\n\nBy default, the Spark submit job uses all available memory (excluding reserved memory for Databricks services). You can set `--driver-memory`, and `--executor-memory` to a smaller value to leave some room for off-heap usage.\n\nThe `--jars`, `--py-files`, `--files` arguments support DBFS and S3 paths.", "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.SparkSubmitTask" }, "sql_task": { - "description": "If sql_task, indicates that this job must execute a SQL task.", + "description": "The task runs a SQL query or file, or it refreshes a SQL alert or a legacy SQL dashboard when the `sql_task` field is present.", "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.SqlTask" }, "task_key": { @@ -3772,12 +3860,7 @@ }, "status": { "description": "Current status of `model_version`", - "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/ml.ModelVersionStatus", - "enum": [ - "PENDING_REGISTRATION", - "FAILED_REGISTRATION", - "READY" - ] + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/ml.ModelVersionStatus" }, "status_message": { "description": "Details on current `status`, if it is pending or failed.", @@ -3805,7 +3888,13 @@ ] }, "ml.ModelVersionStatus": { - "type": "string" + "type": "string", + "description": "Current status of `model_version`", + "enum": [ + "PENDING_REGISTRATION", + "FAILED_REGISTRATION", + "READY" + ] }, "ml.ModelVersionTag": { "anyOf": [ @@ -3901,12 +3990,16 @@ { "type": "object", "properties": { + "report": { + "description": "Select a specific source report.", + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.ReportSpec" + }, "schema": { - "description": "Select tables from a specific source schema.", + "description": "Select all tables from a specific source schema.", "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.SchemaSpec" }, "table": { - "description": "Select tables from a specific source table.", + "description": "Select a specific source table.", "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.TableSpec" } }, @@ -3924,7 +4017,11 @@ "type": "object", "properties": { "connection_id": { - "description": "Immutable. The Unity Catalog connection this gateway pipeline uses to communicate with the source.", + "description": "[Deprecated, use connection_name instead] Immutable. The Unity Catalog connection that this gateway pipeline uses to communicate with the source.", + "$ref": "#/$defs/string" + }, + "connection_name": { + "description": "Immutable. The Unity Catalog connection that this gateway pipeline uses to communicate with the source.", "$ref": "#/$defs/string" }, "gateway_storage_catalog": { @@ -3954,11 +4051,11 @@ "type": "object", "properties": { "connection_name": { - "description": "Immutable. The Unity Catalog connection this ingestion pipeline uses to communicate with the source. Specify either ingestion_gateway_id or connection_name.", + "description": "Immutable. The Unity Catalog connection that this ingestion pipeline uses to communicate with the source. This is used with connectors for applications like Salesforce, Workday, and so on.", "$ref": "#/$defs/string" }, "ingestion_gateway_id": { - "description": "Immutable. Identifier for the ingestion gateway used by this ingestion pipeline to communicate with the source. Specify either ingestion_gateway_id or connection_name.", + "description": "Immutable. Identifier for the gateway that is used by this ingestion pipeline to communicate with the source database. This is used with connectors to databases like SQL Server.", "$ref": "#/$defs/string" }, "objects": { @@ -4135,11 +4232,7 @@ }, "mode": { "description": "Databricks Enhanced Autoscaling optimizes cluster utilization by automatically\nallocating cluster resources based on workload volume, with minimal impact to\nthe data processing latency of your pipelines. Enhanced Autoscaling is available\nfor `updates` clusters only. The legacy autoscaling feature is used for `maintenance`\nclusters.\n", - "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.PipelineClusterAutoscaleMode", - "enum": [ - "ENHANCED", - "LEGACY" - ] + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.PipelineClusterAutoscaleMode" } }, "additionalProperties": false, @@ -4155,7 +4248,12 @@ ] }, "pipelines.PipelineClusterAutoscaleMode": { - "type": "string" + "type": "string", + "description": "Databricks Enhanced Autoscaling optimizes cluster utilization by automatically\nallocating cluster resources based on workload volume, with minimal impact to\nthe data processing latency of your pipelines. Enhanced Autoscaling is available\nfor `updates` clusters only. The legacy autoscaling feature is used for `maintenance`\nclusters.\n", + "enum": [ + "ENHANCED", + "LEGACY" + ] }, "pipelines.PipelineDeployment": { "anyOf": [ @@ -4233,6 +4331,81 @@ } ] }, + "pipelines.ReportSpec": { + "anyOf": [ + { + "type": "object", + "properties": { + "destination_catalog": { + "description": "Required. Destination catalog to store table.", + "$ref": "#/$defs/string" + }, + "destination_schema": { + "description": "Required. Destination schema to store table.", + "$ref": "#/$defs/string" + }, + "destination_table": { + "description": "Required. Destination table name. The pipeline fails if a table with that name already exists.", + "$ref": "#/$defs/string" + }, + "source_url": { + "description": "Required. Report URL in the source system.", + "$ref": "#/$defs/string" + }, + "table_configuration": { + "description": "Configuration settings to control the ingestion of tables. These settings override the table_configuration defined in the IngestionPipelineDefinition object.", + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.TableSpecificConfig" + } + }, + "additionalProperties": false + }, + { + "type": "string", + "pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}" + } + ] + }, + "pipelines.RestartWindow": { + "anyOf": [ + { + "type": "object", + "properties": { + "days_of_week": { + "description": "Days of week in which the restart is allowed to happen (within a five-hour window starting at start_hour).\nIf not specified all days of the week will be used.", + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.RestartWindowDaysOfWeek", + "enum": [ + "MONDAY", + "TUESDAY", + "WEDNESDAY", + "THURSDAY", + "FRIDAY", + "SATURDAY", + "SUNDAY" + ] + }, + "start_hour": { + "description": "An integer between 0 and 23 denoting the start hour for the restart window in the 24-hour day.\nContinuous pipeline restart is triggered only within a five-hour window starting at this hour.", + "$ref": "#/$defs/int" + }, + "time_zone_id": { + "description": "Time zone id of restart window. See https://docs.databricks.com/sql/language-manual/sql-ref-syntax-aux-conf-mgmt-set-timezone.html for details.\nIf not specified, UTC will be used.", + "$ref": "#/$defs/string" + } + }, + "additionalProperties": false, + "required": [ + "start_hour" + ] + }, + { + "type": "string", + "pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}" + } + ] + }, + "pipelines.RestartWindowDaysOfWeek": { + "type": "string" + }, "pipelines.SchemaSpec": { "anyOf": [ { @@ -4281,7 +4454,7 @@ "$ref": "#/$defs/string" }, "destination_table": { - "description": "Optional. Destination table name. The pipeline fails If a table with that name already exists. If not set, the source table name is used.", + "description": "Optional. Destination table name. The pipeline fails if a table with that name already exists. If not set, the source table name is used.", "$ref": "#/$defs/string" }, "source_catalog": { @@ -4324,11 +4497,11 @@ }, "scd_type": { "description": "The SCD type to use to ingest the table.", - "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.TableSpecificConfigScdType", - "enum": [ - "SCD_TYPE_1", - "SCD_TYPE_2" - ] + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.TableSpecificConfigScdType" + }, + "sequence_by": { + "description": "The column names specifying the logical order of events in the source data. Delta Live Tables uses this sequencing to handle change events that arrive out of order.", + "$ref": "#/$defs/slice/string" } }, "additionalProperties": false @@ -4340,7 +4513,12 @@ ] }, "pipelines.TableSpecificConfigScdType": { - "type": "string" + "type": "string", + "description": "The SCD type to use to ingest the table.", + "enum": [ + "SCD_TYPE_1", + "SCD_TYPE_2" + ] }, "serving.Ai21LabsConfig": { "anyOf": [ @@ -4429,11 +4607,7 @@ "properties": { "behavior": { "description": "Behavior for PII filter. Currently only 'BLOCK' is supported. If 'BLOCK' is set for the input guardrail and the request contains PII, the request is not sent to the model server and 400 status code is returned; if 'BLOCK' is set for the output guardrail and the model response contains PII, the PII info in the response is redacted and 400 status code is returned.", - "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.AiGatewayGuardrailPiiBehaviorBehavior", - "enum": [ - "NONE", - "BLOCK" - ] + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.AiGatewayGuardrailPiiBehaviorBehavior" } }, "additionalProperties": false, @@ -4448,7 +4622,12 @@ ] }, "serving.AiGatewayGuardrailPiiBehaviorBehavior": { - "type": "string" + "type": "string", + "description": "Behavior for PII filter. Currently only 'BLOCK' is supported. If 'BLOCK' is set for the input guardrail and the request contains PII, the request is not sent to the model server and 400 status code is returned; if 'BLOCK' is set for the output guardrail and the model response contains PII, the PII info in the response is redacted and 400 status code is returned.", + "enum": [ + "NONE", + "BLOCK" + ] }, "serving.AiGatewayGuardrails": { "anyOf": [ @@ -4513,18 +4692,11 @@ }, "key": { "description": "Key field for a rate limit. Currently, only 'user' and 'endpoint' are supported, with 'endpoint' being the default if not specified.", - "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.AiGatewayRateLimitKey", - "enum": [ - "user", - "endpoint" - ] + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.AiGatewayRateLimitKey" }, "renewal_period": { "description": "Renewal period field for a rate limit. Currently, only 'minute' is supported.", - "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.AiGatewayRateLimitRenewalPeriod", - "enum": [ - "minute" - ] + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.AiGatewayRateLimitRenewalPeriod" } }, "additionalProperties": false, @@ -4540,10 +4712,19 @@ ] }, "serving.AiGatewayRateLimitKey": { - "type": "string" + "type": "string", + "description": "Key field for a rate limit. Currently, only 'user' and 'endpoint' are supported, with 'endpoint' being the default if not specified.", + "enum": [ + "user", + "endpoint" + ] }, "serving.AiGatewayRateLimitRenewalPeriod": { - "type": "string" + "type": "string", + "description": "Renewal period field for a rate limit. Currently, only 'minute' is supported.", + "enum": [ + "minute" + ] }, "serving.AiGatewayUsageTrackingConfig": { "anyOf": [ @@ -4590,13 +4771,7 @@ }, "bedrock_provider": { "description": "The underlying provider in Amazon Bedrock. Supported values (case insensitive) include: Anthropic, Cohere, AI21Labs, Amazon.", - "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.AmazonBedrockConfigBedrockProvider", - "enum": [ - "anthropic", - "cohere", - "ai21labs", - "amazon" - ] + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.AmazonBedrockConfigBedrockProvider" } }, "additionalProperties": false, @@ -4612,7 +4787,14 @@ ] }, "serving.AmazonBedrockConfigBedrockProvider": { - "type": "string" + "type": "string", + "description": "The underlying provider in Amazon Bedrock. Supported values (case insensitive) include: Anthropic, Cohere, AI21Labs, Amazon.", + "enum": [ + "anthropic", + "cohere", + "ai21labs", + "amazon" + ] }, "serving.AnthropicConfig": { "anyOf": [ @@ -4819,17 +5001,7 @@ }, "provider": { "description": "The name of the provider for the external model. Currently, the supported providers are 'ai21labs', 'anthropic',\n'amazon-bedrock', 'cohere', 'databricks-model-serving', 'google-cloud-vertex-ai', 'openai', and 'palm'.\",\n", - "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.ExternalModelProvider", - "enum": [ - "ai21labs", - "anthropic", - "amazon-bedrock", - "cohere", - "databricks-model-serving", - "google-cloud-vertex-ai", - "openai", - "palm" - ] + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.ExternalModelProvider" }, "task": { "description": "The task type of the external model.", @@ -4850,7 +5022,18 @@ ] }, "serving.ExternalModelProvider": { - "type": "string" + "type": "string", + "description": "The name of the provider for the external model. Currently, the supported providers are 'ai21labs', 'anthropic',\n'amazon-bedrock', 'cohere', 'databricks-model-serving', 'google-cloud-vertex-ai', 'openai', and 'palm'.\",\n", + "enum": [ + "ai21labs", + "anthropic", + "amazon-bedrock", + "cohere", + "databricks-model-serving", + "google-cloud-vertex-ai", + "openai", + "palm" + ] }, "serving.GoogleCloudVertexAiConfig": { "anyOf": [ @@ -4956,18 +5139,11 @@ }, "key": { "description": "Key field for a serving endpoint rate limit. Currently, only 'user' and 'endpoint' are supported, with 'endpoint' being the default if not specified.", - "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.RateLimitKey", - "enum": [ - "user", - "endpoint" - ] + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.RateLimitKey" }, "renewal_period": { "description": "Renewal period field for a serving endpoint rate limit. Currently, only 'minute' is supported.", - "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.RateLimitRenewalPeriod", - "enum": [ - "minute" - ] + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.RateLimitRenewalPeriod" } }, "additionalProperties": false, @@ -4983,10 +5159,19 @@ ] }, "serving.RateLimitKey": { - "type": "string" + "type": "string", + "description": "Key field for a serving endpoint rate limit. Currently, only 'user' and 'endpoint' are supported, with 'endpoint' being the default if not specified.", + "enum": [ + "user", + "endpoint" + ] }, "serving.RateLimitRenewalPeriod": { - "type": "string" + "type": "string", + "description": "Renewal period field for a serving endpoint rate limit. Currently, only 'minute' is supported.", + "enum": [ + "minute" + ] }, "serving.Route": { "anyOf": [ @@ -5111,23 +5296,11 @@ }, "workload_size": { "description": "The workload size of the served model. The workload size corresponds to a range of provisioned concurrency that the compute will autoscale between.\nA single unit of provisioned concurrency can process one request at a time.\nValid workload sizes are \"Small\" (4 - 4 provisioned concurrency), \"Medium\" (8 - 16 provisioned concurrency), and \"Large\" (16 - 64 provisioned concurrency).\nIf scale-to-zero is enabled, the lower bound of the provisioned concurrency for each workload size will be 0.\n", - "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.ServedModelInputWorkloadSize", - "enum": [ - "Small", - "Medium", - "Large" - ] + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.ServedModelInputWorkloadSize" }, "workload_type": { "description": "The workload type of the served model. The workload type selects which type of compute to use in the endpoint. The default value for this parameter is\n\"CPU\". For deep learning workloads, GPU acceleration is available by selecting workload types like GPU_SMALL and others.\nSee the available [GPU types](https://docs.databricks.com/machine-learning/model-serving/create-manage-serving-endpoints.html#gpu-workload-types).\n", - "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.ServedModelInputWorkloadType", - "enum": [ - "CPU", - "GPU_SMALL", - "GPU_MEDIUM", - "GPU_LARGE", - "MULTIGPU_MEDIUM" - ] + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.ServedModelInputWorkloadType" } }, "additionalProperties": false, @@ -5144,10 +5317,24 @@ ] }, "serving.ServedModelInputWorkloadSize": { - "type": "string" + "type": "string", + "description": "The workload size of the served model. The workload size corresponds to a range of provisioned concurrency that the compute will autoscale between.\nA single unit of provisioned concurrency can process one request at a time.\nValid workload sizes are \"Small\" (4 - 4 provisioned concurrency), \"Medium\" (8 - 16 provisioned concurrency), and \"Large\" (16 - 64 provisioned concurrency).\nIf scale-to-zero is enabled, the lower bound of the provisioned concurrency for each workload size will be 0.\n", + "enum": [ + "Small", + "Medium", + "Large" + ] }, "serving.ServedModelInputWorkloadType": { - "type": "string" + "type": "string", + "description": "The workload type of the served model. The workload type selects which type of compute to use in the endpoint. The default value for this parameter is\n\"CPU\". For deep learning workloads, GPU acceleration is available by selecting workload types like GPU_SMALL and others.\nSee the available [GPU types](https://docs.databricks.com/machine-learning/model-serving/create-manage-serving-endpoints.html#gpu-workload-types).\n", + "enum": [ + "CPU", + "GPU_SMALL", + "GPU_MEDIUM", + "GPU_LARGE", + "MULTIGPU_MEDIUM" + ] }, "serving.TrafficConfig": { "anyOf": [ @@ -5246,6 +5433,20 @@ } ] }, + "resources.Dashboard": { + "anyOf": [ + { + "type": "object", + "additionalProperties": { + "$ref": "#/$defs/github.com/databricks/cli/bundle/config/resources.Dashboard" + } + }, + { + "type": "string", + "pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}" + } + ] + }, "resources.Job": { "anyOf": [ { diff --git a/cmd/account/service-principal-secrets/service-principal-secrets.go b/cmd/account/service-principal-secrets/service-principal-secrets.go index 47cfa4b0..f7dc4e88 100755 --- a/cmd/account/service-principal-secrets/service-principal-secrets.go +++ b/cmd/account/service-principal-secrets/service-principal-secrets.go @@ -191,6 +191,8 @@ func newList() *cobra.Command { // TODO: short flags + cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `An opaque page token which was the next_page_token in the response of the previous request to list the secrets for this service principal.`) + cmd.Use = "list SERVICE_PRINCIPAL_ID" cmd.Short = `List service principal secrets.` cmd.Long = `List service principal secrets. diff --git a/cmd/account/workspaces/workspaces.go b/cmd/account/workspaces/workspaces.go index 82d3d7db..bee01eb4 100755 --- a/cmd/account/workspaces/workspaces.go +++ b/cmd/account/workspaces/workspaces.go @@ -81,6 +81,7 @@ func newCreate() *cobra.Command { cmd.Flags().StringVar(&createReq.DeploymentName, "deployment-name", createReq.DeploymentName, `The deployment name defines part of the subdomain for the workspace.`) // TODO: complex arg: gcp_managed_network_config // TODO: complex arg: gke_config + cmd.Flags().BoolVar(&createReq.IsNoPublicIpEnabled, "is-no-public-ip-enabled", createReq.IsNoPublicIpEnabled, `Whether no public IP is enabled for the workspace.`) cmd.Flags().StringVar(&createReq.Location, "location", createReq.Location, `The Google Cloud region of the workspace data plane in your Google account.`) cmd.Flags().StringVar(&createReq.ManagedServicesCustomerManagedKeyId, "managed-services-customer-managed-key-id", createReq.ManagedServicesCustomerManagedKeyId, `The ID of the workspace's managed services encryption key configuration object.`) cmd.Flags().StringVar(&createReq.NetworkId, "network-id", createReq.NetworkId, ``) @@ -420,6 +421,7 @@ func newUpdate() *cobra.Command { cmd.Flags().StringVar(&updateReq.ManagedServicesCustomerManagedKeyId, "managed-services-customer-managed-key-id", updateReq.ManagedServicesCustomerManagedKeyId, `The ID of the workspace's managed services encryption key configuration object.`) cmd.Flags().StringVar(&updateReq.NetworkConnectivityConfigId, "network-connectivity-config-id", updateReq.NetworkConnectivityConfigId, ``) cmd.Flags().StringVar(&updateReq.NetworkId, "network-id", updateReq.NetworkId, `The ID of the workspace's network configuration object.`) + cmd.Flags().StringVar(&updateReq.PrivateAccessSettingsId, "private-access-settings-id", updateReq.PrivateAccessSettingsId, `The ID of the workspace's private access settings configuration object.`) cmd.Flags().StringVar(&updateReq.StorageConfigurationId, "storage-configuration-id", updateReq.StorageConfigurationId, `The ID of the workspace's storage configuration object.`) cmd.Flags().StringVar(&updateReq.StorageCustomerManagedKeyId, "storage-customer-managed-key-id", updateReq.StorageCustomerManagedKeyId, `The ID of the key configuration object for workspace storage.`) diff --git a/cmd/bundle/bundle.go b/cmd/bundle/bundle.go index 0880c9c4..fb88cd7d 100644 --- a/cmd/bundle/bundle.go +++ b/cmd/bundle/bundle.go @@ -27,5 +27,6 @@ func New() *cobra.Command { cmd.AddCommand(newGenerateCommand()) cmd.AddCommand(newDebugCommand()) cmd.AddCommand(deployment.NewDeploymentCommand()) + cmd.AddCommand(newOpenCommand()) return cmd } diff --git a/cmd/bundle/destroy.go b/cmd/bundle/destroy.go index cd7e6306..711abbcd 100644 --- a/cmd/bundle/destroy.go +++ b/cmd/bundle/destroy.go @@ -6,6 +6,7 @@ import ( "os" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config/mutator" "github.com/databricks/cli/bundle/phases" "github.com/databricks/cli/cmd/bundle/utils" "github.com/databricks/cli/cmd/root" @@ -62,7 +63,12 @@ func newDestroyCommand() *cobra.Command { diags = bundle.Apply(ctx, b, bundle.Seq( phases.Initialize(), - phases.Build(), + // We need to resolve artifact variable (how we do it in build phase) + // because some of the to-be-destroyed resource might use this variable. + // Not resolving might lead to terraform "Reference to undeclared resource" error + mutator.ResolveVariableReferences( + "artifacts", + ), phases.Destroy(), )) if err := diags.Error(); err != nil { diff --git a/cmd/bundle/generate.go b/cmd/bundle/generate.go index 1e3d56e4..7dea19ff 100644 --- a/cmd/bundle/generate.go +++ b/cmd/bundle/generate.go @@ -16,6 +16,7 @@ func newGenerateCommand() *cobra.Command { cmd.AddCommand(generate.NewGenerateJobCommand()) cmd.AddCommand(generate.NewGeneratePipelineCommand()) + cmd.AddCommand(generate.NewGenerateDashboardCommand()) cmd.PersistentFlags().StringVar(&key, "key", "", `resource key to use for the generated configuration`) return cmd } diff --git a/cmd/bundle/generate/dashboard.go b/cmd/bundle/generate/dashboard.go new file mode 100644 index 00000000..4a538a29 --- /dev/null +++ b/cmd/bundle/generate/dashboard.go @@ -0,0 +1,467 @@ +package generate + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "os" + "path" + "path/filepath" + "strings" + "time" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config/generate" + "github.com/databricks/cli/bundle/deploy/terraform" + "github.com/databricks/cli/bundle/phases" + "github.com/databricks/cli/bundle/render" + "github.com/databricks/cli/bundle/resources" + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/diag" + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/dyn/yamlsaver" + "github.com/databricks/cli/libs/textutil" + "github.com/databricks/databricks-sdk-go" + "github.com/databricks/databricks-sdk-go/apierr" + "github.com/databricks/databricks-sdk-go/service/dashboards" + "github.com/databricks/databricks-sdk-go/service/workspace" + "github.com/spf13/cobra" + "golang.org/x/exp/maps" + "gopkg.in/yaml.v3" +) + +type dashboard struct { + // Lookup flags for one-time generate. + existingPath string + existingID string + + // Lookup flag for existing bundle resource. + resource string + + // Where to write the configuration and dashboard representation. + resourceDir string + dashboardDir string + + // Force overwrite of existing files. + force bool + + // Watch for changes to the dashboard. + watch bool + + // Relative path from the resource directory to the dashboard directory. + relativeDashboardDir string +} + +func (d *dashboard) resolveID(ctx context.Context, b *bundle.Bundle) (string, diag.Diagnostics) { + switch { + case d.existingPath != "": + return d.resolveFromPath(ctx, b) + case d.existingID != "": + return d.resolveFromID(ctx, b) + } + + return "", diag.Errorf("expected one of --dashboard-path, --dashboard-id") +} + +func (d *dashboard) resolveFromPath(ctx context.Context, b *bundle.Bundle) (string, diag.Diagnostics) { + w := b.WorkspaceClient() + obj, err := w.Workspace.GetStatusByPath(ctx, d.existingPath) + if err != nil { + if apierr.IsMissing(err) { + return "", diag.Errorf("dashboard %q not found", path.Base(d.existingPath)) + } + + // Emit a more descriptive error message for legacy dashboards. + if errors.Is(err, apierr.ErrBadRequest) && strings.HasPrefix(err.Error(), "dbsqlDashboard ") { + return "", diag.Diagnostics{ + { + Severity: diag.Error, + Summary: fmt.Sprintf("dashboard %q is a legacy dashboard", path.Base(d.existingPath)), + Detail: "" + + "Databricks Asset Bundles work exclusively with AI/BI dashboards.\n" + + "\n" + + "Instructions on how to convert a legacy dashboard to an AI/BI dashboard\n" + + "can be found at: https://docs.databricks.com/en/dashboards/clone-legacy-to-aibi.html.", + }, + } + } + + return "", diag.FromErr(err) + } + + if obj.ObjectType != workspace.ObjectTypeDashboard { + found := strings.ToLower(obj.ObjectType.String()) + return "", diag.Diagnostics{ + { + Severity: diag.Error, + Summary: fmt.Sprintf("expected a dashboard, found a %s", found), + }, + } + } + + if obj.ResourceId == "" { + return "", diag.Diagnostics{ + { + Severity: diag.Error, + Summary: "expected a non-empty dashboard resource ID", + }, + } + } + + return obj.ResourceId, nil +} + +func (d *dashboard) resolveFromID(ctx context.Context, b *bundle.Bundle) (string, diag.Diagnostics) { + w := b.WorkspaceClient() + obj, err := w.Lakeview.GetByDashboardId(ctx, d.existingID) + if err != nil { + if apierr.IsMissing(err) { + return "", diag.Errorf("dashboard with ID %s not found", d.existingID) + } + return "", diag.FromErr(err) + } + + return obj.DashboardId, nil +} + +func remarshalJSON(data []byte) ([]byte, error) { + var tmp any + var err error + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // Remarshal the data to ensure its formatting is stable. + // The result will have alphabetically sorted keys and be indented. + // HTML escaping is disabled to retain characters such as &, <, and >. + var buf bytes.Buffer + enc := json.NewEncoder(&buf) + enc.SetIndent("", " ") + enc.SetEscapeHTML(false) + err = enc.Encode(tmp) + if err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +func (d *dashboard) saveSerializedDashboard(_ context.Context, b *bundle.Bundle, dashboard *dashboards.Dashboard, filename string) error { + // Unmarshal and remarshal the serialized dashboard to ensure it is formatted correctly. + // The result will have alphabetically sorted keys and be indented. + data, err := remarshalJSON([]byte(dashboard.SerializedDashboard)) + if err != nil { + return err + } + + // Make sure the output directory exists. + if err := os.MkdirAll(filepath.Dir(filename), 0755); err != nil { + return err + } + + // Clean the filename to ensure it is a valid path (and can be used on this OS). + filename = filepath.Clean(filename) + + // Attempt to make the path relative to the bundle root. + rel, err := filepath.Rel(b.BundleRootPath, filename) + if err != nil { + rel = filename + } + + // Verify that the file does not already exist. + info, err := os.Stat(filename) + if err == nil { + if info.IsDir() { + return fmt.Errorf("%s is a directory", rel) + } + if !d.force { + return fmt.Errorf("%s already exists. Use --force to overwrite", rel) + } + } + + fmt.Printf("Writing dashboard to %q\n", rel) + return os.WriteFile(filename, data, 0644) +} + +func (d *dashboard) saveConfiguration(ctx context.Context, b *bundle.Bundle, dashboard *dashboards.Dashboard, key string) error { + // Save serialized dashboard definition to the dashboard directory. + dashboardBasename := fmt.Sprintf("%s.lvdash.json", key) + dashboardPath := filepath.Join(d.dashboardDir, dashboardBasename) + err := d.saveSerializedDashboard(ctx, b, dashboard, dashboardPath) + if err != nil { + return err + } + + // Synthesize resource configuration. + v, err := generate.ConvertDashboardToValue(dashboard, path.Join(d.relativeDashboardDir, dashboardBasename)) + if err != nil { + return err + } + + result := map[string]dyn.Value{ + "resources": dyn.V(map[string]dyn.Value{ + "dashboards": dyn.V(map[string]dyn.Value{ + key: v, + }), + }), + } + + // Make sure the output directory exists. + if err := os.MkdirAll(d.resourceDir, 0755); err != nil { + return err + } + + // Save the configuration to the resource directory. + resourcePath := filepath.Join(d.resourceDir, fmt.Sprintf("%s.dashboard.yml", key)) + saver := yamlsaver.NewSaverWithStyle(map[string]yaml.Style{ + "display_name": yaml.DoubleQuotedStyle, + }) + + // Attempt to make the path relative to the bundle root. + rel, err := filepath.Rel(b.BundleRootPath, resourcePath) + if err != nil { + rel = resourcePath + } + + fmt.Printf("Writing configuration to %q\n", rel) + err = saver.SaveAsYAML(result, resourcePath, d.force) + if err != nil { + return err + } + + return nil +} + +func waitForChanges(ctx context.Context, w *databricks.WorkspaceClient, dashboard *dashboards.Dashboard) diag.Diagnostics { + // Compute [time.Time] for the most recent update. + tref, err := time.Parse(time.RFC3339, dashboard.UpdateTime) + if err != nil { + return diag.FromErr(err) + } + + for { + obj, err := w.Workspace.GetStatusByPath(ctx, dashboard.Path) + if err != nil { + return diag.FromErr(err) + } + + // Compute [time.Time] from timestamp in millis since epoch. + tcur := time.Unix(0, obj.ModifiedAt*int64(time.Millisecond)) + if tcur.After(tref) { + break + } + + time.Sleep(1 * time.Second) + } + + return nil +} + +func (d *dashboard) updateDashboardForResource(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + resource, ok := b.Config.Resources.Dashboards[d.resource] + if !ok { + return diag.Errorf("dashboard resource %q is not defined", d.resource) + } + + if resource.FilePath == "" { + return diag.Errorf("dashboard resource %q has no file path defined", d.resource) + } + + // Resolve the dashboard ID from the resource. + dashboardID := resource.ID + + // Overwrite the dashboard at the path referenced from the resource. + dashboardPath := resource.FilePath + + w := b.WorkspaceClient() + + // Start polling the underlying dashboard for changes. + var etag string + for { + dashboard, err := w.Lakeview.GetByDashboardId(ctx, dashboardID) + if err != nil { + return diag.FromErr(err) + } + + if etag != dashboard.Etag { + err = d.saveSerializedDashboard(ctx, b, dashboard, dashboardPath) + if err != nil { + return diag.FromErr(err) + } + } + + // Abort if we are not watching for changes. + if !d.watch { + return nil + } + + // Update the etag for the next iteration. + etag = dashboard.Etag + + // Now poll the workspace API for changes. + // This is much more efficient than polling the dashboard API because it + // includes the entire serialized dashboard whereas we're only interested + // in the last modified time of the dashboard here. + waitForChanges(ctx, w, dashboard) + } +} + +func (d *dashboard) generateForExisting(ctx context.Context, b *bundle.Bundle, dashboardID string) diag.Diagnostics { + w := b.WorkspaceClient() + dashboard, err := w.Lakeview.GetByDashboardId(ctx, dashboardID) + if err != nil { + return diag.FromErr(err) + } + + key := textutil.NormalizeString(dashboard.DisplayName) + err = d.saveConfiguration(ctx, b, dashboard, key) + if err != nil { + return diag.FromErr(err) + } + + return nil +} + +func (d *dashboard) initialize(b *bundle.Bundle) diag.Diagnostics { + // Make the paths absolute if they aren't already. + if !filepath.IsAbs(d.resourceDir) { + d.resourceDir = filepath.Join(b.BundleRootPath, d.resourceDir) + } + if !filepath.IsAbs(d.dashboardDir) { + d.dashboardDir = filepath.Join(b.BundleRootPath, d.dashboardDir) + } + + // Make sure we know how the dashboard path is relative to the resource path. + rel, err := filepath.Rel(d.resourceDir, d.dashboardDir) + if err != nil { + return diag.FromErr(err) + } + + d.relativeDashboardDir = filepath.ToSlash(rel) + return nil +} + +func (d *dashboard) runForResource(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + diags := bundle.Apply(ctx, b, bundle.Seq( + phases.Initialize(), + terraform.Interpolate(), + terraform.Write(), + terraform.StatePull(), + terraform.Load(), + )) + if diags.HasError() { + return diags + } + + return d.updateDashboardForResource(ctx, b) +} + +func (d *dashboard) runForExisting(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + // Resolve the ID of the dashboard to generate configuration for. + dashboardID, diags := d.resolveID(ctx, b) + if diags.HasError() { + return diags + } + + return d.generateForExisting(ctx, b, dashboardID) +} + +func (d *dashboard) RunE(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + b, diags := root.MustConfigureBundle(cmd) + if diags.HasError() { + return diags.Error() + } + + diags = d.initialize(b) + if diags.HasError() { + return diags.Error() + } + + if d.resource != "" { + diags = d.runForResource(ctx, b) + } else { + diags = d.runForExisting(ctx, b) + } + + renderOpts := render.RenderOptions{RenderSummaryTable: false} + err := render.RenderDiagnostics(cmd.OutOrStdout(), b, diags, renderOpts) + if err != nil { + return fmt.Errorf("failed to render output: %w", err) + } + + if diags.HasError() { + return root.ErrAlreadyPrinted + } + + return nil +} + +// filterDashboards returns a filter that only includes dashboards. +func filterDashboards(ref resources.Reference) bool { + return ref.Description.SingularName == "dashboard" +} + +// dashboardResourceCompletion executes to autocomplete the argument to the resource flag. +func dashboardResourceCompletion(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + b, diags := root.MustConfigureBundle(cmd) + if err := diags.Error(); err != nil { + cobra.CompErrorln(err.Error()) + return nil, cobra.ShellCompDirectiveError + } + + if b == nil { + return nil, cobra.ShellCompDirectiveNoFileComp + } + + return maps.Keys(resources.Completions(b, filterDashboards)), cobra.ShellCompDirectiveNoFileComp +} + +func NewGenerateDashboardCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "dashboard", + Short: "Generate configuration for a dashboard", + } + + d := &dashboard{} + + // Lookup flags. + cmd.Flags().StringVar(&d.existingPath, "existing-path", "", `workspace path of the dashboard to generate configuration for`) + cmd.Flags().StringVar(&d.existingID, "existing-id", "", `ID of the dashboard to generate configuration for`) + cmd.Flags().StringVar(&d.resource, "resource", "", `resource key of dashboard to watch for changes`) + + // Alias lookup flags that include the resource type name. + // Included for symmetry with the other generate commands, but we prefer the shorter flags. + cmd.Flags().StringVar(&d.existingPath, "existing-dashboard-path", "", `workspace path of the dashboard to generate configuration for`) + cmd.Flags().StringVar(&d.existingID, "existing-dashboard-id", "", `ID of the dashboard to generate configuration for`) + cmd.Flags().MarkHidden("existing-dashboard-path") + cmd.Flags().MarkHidden("existing-dashboard-id") + + // Output flags. + cmd.Flags().StringVarP(&d.resourceDir, "resource-dir", "d", "./resources", `directory to write the configuration to`) + cmd.Flags().StringVarP(&d.dashboardDir, "dashboard-dir", "s", "./src", `directory to write the dashboard representation to`) + cmd.Flags().BoolVarP(&d.force, "force", "f", false, `force overwrite existing files in the output directory`) + + // Exactly one of the lookup flags must be provided. + cmd.MarkFlagsOneRequired( + "existing-path", + "existing-id", + "resource", + ) + + // Watch flag. This is relevant only in combination with the resource flag. + cmd.Flags().BoolVar(&d.watch, "watch", false, `watch for changes to the dashboard and update the configuration`) + + // Make sure the watch flag is only used with the existing-resource flag. + cmd.MarkFlagsMutuallyExclusive("watch", "existing-path") + cmd.MarkFlagsMutuallyExclusive("watch", "existing-id") + + // Completion for the resource flag. + cmd.RegisterFlagCompletionFunc("resource", dashboardResourceCompletion) + + cmd.RunE = d.RunE + return cmd +} diff --git a/cmd/bundle/generate/dashboard_test.go b/cmd/bundle/generate/dashboard_test.go new file mode 100644 index 00000000..6741e6a3 --- /dev/null +++ b/cmd/bundle/generate/dashboard_test.go @@ -0,0 +1,182 @@ +package generate + +import ( + "context" + "os" + "path/filepath" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/databricks-sdk-go/apierr" + "github.com/databricks/databricks-sdk-go/experimental/mocks" + "github.com/databricks/databricks-sdk-go/service/dashboards" + "github.com/databricks/databricks-sdk-go/service/workspace" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +func TestDashboard_ErrorOnLegacyDashboard(t *testing.T) { + // Response to a GetStatus request on a path pointing to a legacy dashboard. + // + // < HTTP/2.0 400 Bad Request + // < { + // < "error_code": "BAD_REQUEST", + // < "message": "dbsqlDashboard is not user-facing." + // < } + + d := dashboard{ + existingPath: "/path/to/legacy dashboard", + } + + m := mocks.NewMockWorkspaceClient(t) + w := m.GetMockWorkspaceAPI() + w.On("GetStatusByPath", mock.Anything, "/path/to/legacy dashboard").Return(nil, &apierr.APIError{ + StatusCode: 400, + ErrorCode: "BAD_REQUEST", + Message: "dbsqlDashboard is not user-facing.", + }) + + ctx := context.Background() + b := &bundle.Bundle{} + b.SetWorkpaceClient(m.WorkspaceClient) + + _, diags := d.resolveID(ctx, b) + require.Len(t, diags, 1) + assert.Equal(t, diags[0].Summary, "dashboard \"legacy dashboard\" is a legacy dashboard") +} + +func TestDashboard_ExistingID_Nominal(t *testing.T) { + root := t.TempDir() + b := &bundle.Bundle{ + BundleRootPath: root, + } + + m := mocks.NewMockWorkspaceClient(t) + b.SetWorkpaceClient(m.WorkspaceClient) + + dashboardsAPI := m.GetMockLakeviewAPI() + dashboardsAPI.EXPECT().GetByDashboardId(mock.Anything, "f00dcafe").Return(&dashboards.Dashboard{ + DashboardId: "f00dcafe", + DisplayName: "This is a test dashboard", + SerializedDashboard: `{"pages":[{"displayName":"New Page","layout":[],"name":"12345678"}]}`, + WarehouseId: "w4r3h0us3", + }, nil) + + ctx := bundle.Context(context.Background(), b) + cmd := NewGenerateDashboardCommand() + cmd.SetContext(ctx) + cmd.Flag("existing-id").Value.Set("f00dcafe") + + err := cmd.RunE(cmd, []string{}) + require.NoError(t, err) + + // Assert the contents of the generated configuration + data, err := os.ReadFile(filepath.Join(root, "resources", "this_is_a_test_dashboard.dashboard.yml")) + require.NoError(t, err) + assert.Equal(t, `resources: + dashboards: + this_is_a_test_dashboard: + display_name: "This is a test dashboard" + warehouse_id: w4r3h0us3 + file_path: ../src/this_is_a_test_dashboard.lvdash.json +`, string(data)) + + data, err = os.ReadFile(filepath.Join(root, "src", "this_is_a_test_dashboard.lvdash.json")) + require.NoError(t, err) + assert.JSONEq(t, `{"pages":[{"displayName":"New Page","layout":[],"name":"12345678"}]}`, string(data)) +} + +func TestDashboard_ExistingID_NotFound(t *testing.T) { + root := t.TempDir() + b := &bundle.Bundle{ + BundleRootPath: root, + } + + m := mocks.NewMockWorkspaceClient(t) + b.SetWorkpaceClient(m.WorkspaceClient) + + dashboardsAPI := m.GetMockLakeviewAPI() + dashboardsAPI.EXPECT().GetByDashboardId(mock.Anything, "f00dcafe").Return(nil, &apierr.APIError{ + StatusCode: 404, + }) + + ctx := bundle.Context(context.Background(), b) + cmd := NewGenerateDashboardCommand() + cmd.SetContext(ctx) + cmd.Flag("existing-id").Value.Set("f00dcafe") + + err := cmd.RunE(cmd, []string{}) + require.Error(t, err) +} + +func TestDashboard_ExistingPath_Nominal(t *testing.T) { + root := t.TempDir() + b := &bundle.Bundle{ + BundleRootPath: root, + } + + m := mocks.NewMockWorkspaceClient(t) + b.SetWorkpaceClient(m.WorkspaceClient) + + workspaceAPI := m.GetMockWorkspaceAPI() + workspaceAPI.EXPECT().GetStatusByPath(mock.Anything, "/path/to/dashboard").Return(&workspace.ObjectInfo{ + ObjectType: workspace.ObjectTypeDashboard, + ResourceId: "f00dcafe", + }, nil) + + dashboardsAPI := m.GetMockLakeviewAPI() + dashboardsAPI.EXPECT().GetByDashboardId(mock.Anything, "f00dcafe").Return(&dashboards.Dashboard{ + DashboardId: "f00dcafe", + DisplayName: "This is a test dashboard", + SerializedDashboard: `{"pages":[{"displayName":"New Page","layout":[],"name":"12345678"}]}`, + WarehouseId: "w4r3h0us3", + }, nil) + + ctx := bundle.Context(context.Background(), b) + cmd := NewGenerateDashboardCommand() + cmd.SetContext(ctx) + cmd.Flag("existing-path").Value.Set("/path/to/dashboard") + + err := cmd.RunE(cmd, []string{}) + require.NoError(t, err) + + // Assert the contents of the generated configuration + data, err := os.ReadFile(filepath.Join(root, "resources", "this_is_a_test_dashboard.dashboard.yml")) + require.NoError(t, err) + assert.Equal(t, `resources: + dashboards: + this_is_a_test_dashboard: + display_name: "This is a test dashboard" + warehouse_id: w4r3h0us3 + file_path: ../src/this_is_a_test_dashboard.lvdash.json +`, string(data)) + + data, err = os.ReadFile(filepath.Join(root, "src", "this_is_a_test_dashboard.lvdash.json")) + require.NoError(t, err) + assert.JSONEq(t, `{"pages":[{"displayName":"New Page","layout":[],"name":"12345678"}]}`, string(data)) +} + +func TestDashboard_ExistingPath_NotFound(t *testing.T) { + root := t.TempDir() + b := &bundle.Bundle{ + BundleRootPath: root, + } + + m := mocks.NewMockWorkspaceClient(t) + b.SetWorkpaceClient(m.WorkspaceClient) + + workspaceAPI := m.GetMockWorkspaceAPI() + workspaceAPI.EXPECT().GetStatusByPath(mock.Anything, "/path/to/dashboard").Return(nil, &apierr.APIError{ + StatusCode: 404, + }) + + ctx := bundle.Context(context.Background(), b) + cmd := NewGenerateDashboardCommand() + cmd.SetContext(ctx) + cmd.Flag("existing-path").Value.Set("/path/to/dashboard") + + err := cmd.RunE(cmd, []string{}) + require.Error(t, err) +} diff --git a/cmd/bundle/open.go b/cmd/bundle/open.go new file mode 100644 index 00000000..a2ad32fd --- /dev/null +++ b/cmd/bundle/open.go @@ -0,0 +1,144 @@ +package bundle + +import ( + "context" + "errors" + "fmt" + "os" + "path/filepath" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config/mutator" + "github.com/databricks/cli/bundle/deploy/terraform" + "github.com/databricks/cli/bundle/phases" + "github.com/databricks/cli/bundle/resources" + "github.com/databricks/cli/cmd/bundle/utils" + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/spf13/cobra" + "golang.org/x/exp/maps" + + "github.com/pkg/browser" +) + +func promptOpenArgument(ctx context.Context, b *bundle.Bundle) (string, error) { + // Compute map of "Human readable name of resource" -> "resource key". + inv := make(map[string]string) + for k, ref := range resources.Completions(b) { + title := fmt.Sprintf("%s: %s", ref.Description.SingularTitle, ref.Resource.GetName()) + inv[title] = k + } + + key, err := cmdio.Select(ctx, inv, "Resource to open") + if err != nil { + return "", err + } + + return key, nil +} + +func resolveOpenArgument(ctx context.Context, b *bundle.Bundle, args []string) (string, error) { + // If no arguments are specified, prompt the user to select the resource to open. + if len(args) == 0 && cmdio.IsPromptSupported(ctx) { + return promptOpenArgument(ctx, b) + } + + if len(args) < 1 { + return "", fmt.Errorf("expected a KEY of the resource to open") + } + + return args[0], nil +} + +func newOpenCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "open", + Short: "Open a resource in the browser", + Args: root.MaximumNArgs(1), + } + + var forcePull bool + cmd.Flags().BoolVar(&forcePull, "force-pull", false, "Skip local cache and load the state from the remote workspace") + + cmd.RunE = func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + b, diags := utils.ConfigureBundleWithVariables(cmd) + if err := diags.Error(); err != nil { + return diags.Error() + } + + diags = bundle.Apply(ctx, b, phases.Initialize()) + if err := diags.Error(); err != nil { + return err + } + + arg, err := resolveOpenArgument(ctx, b, args) + if err != nil { + return err + } + + cacheDir, err := terraform.Dir(ctx, b) + if err != nil { + return err + } + _, stateFileErr := os.Stat(filepath.Join(cacheDir, terraform.TerraformStateFileName)) + _, configFileErr := os.Stat(filepath.Join(cacheDir, terraform.TerraformConfigFileName)) + noCache := errors.Is(stateFileErr, os.ErrNotExist) || errors.Is(configFileErr, os.ErrNotExist) + + if forcePull || noCache { + diags = bundle.Apply(ctx, b, bundle.Seq( + terraform.StatePull(), + terraform.Interpolate(), + terraform.Write(), + )) + if err := diags.Error(); err != nil { + return err + } + } + + diags = bundle.Apply(ctx, b, bundle.Seq( + terraform.Load(), + mutator.InitializeURLs(), + )) + if err := diags.Error(); err != nil { + return err + } + + // Locate resource to open. + ref, err := resources.Lookup(b, arg) + if err != nil { + return err + } + + // Confirm that the resource has a URL. + url := ref.Resource.GetURL() + if url == "" { + return fmt.Errorf("resource does not have a URL associated with it (has it been deployed?)") + } + + return browser.OpenURL(url) + } + + cmd.ValidArgsFunction = func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + b, diags := root.MustConfigureBundle(cmd) + if err := diags.Error(); err != nil { + cobra.CompErrorln(err.Error()) + return nil, cobra.ShellCompDirectiveError + } + + // No completion in the context of a bundle. + // Source and destination paths are taken from bundle configuration. + if b == nil { + return nil, cobra.ShellCompDirectiveNoFileComp + } + + if len(args) == 0 { + completions := resources.Completions(b) + return maps.Keys(completions), cobra.ShellCompDirectiveNoFileComp + } else { + return nil, cobra.ShellCompDirectiveNoFileComp + } + } + + return cmd +} diff --git a/cmd/bundle/run.go b/cmd/bundle/run.go index 9ef5eb8f..7a92766d 100644 --- a/cmd/bundle/run.go +++ b/cmd/bundle/run.go @@ -1,20 +1,75 @@ package bundle import ( + "context" "encoding/json" "fmt" "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/deploy/terraform" "github.com/databricks/cli/bundle/phases" + "github.com/databricks/cli/bundle/resources" "github.com/databricks/cli/bundle/run" + "github.com/databricks/cli/bundle/run/output" "github.com/databricks/cli/cmd/bundle/utils" "github.com/databricks/cli/cmd/root" "github.com/databricks/cli/libs/cmdio" "github.com/databricks/cli/libs/flags" "github.com/spf13/cobra" + "golang.org/x/exp/maps" ) +func promptRunArgument(ctx context.Context, b *bundle.Bundle) (string, error) { + // Compute map of "Human readable name of resource" -> "resource key". + inv := make(map[string]string) + for k, ref := range resources.Completions(b, run.IsRunnable) { + title := fmt.Sprintf("%s: %s", ref.Description.SingularTitle, ref.Resource.GetName()) + inv[title] = k + } + + key, err := cmdio.Select(ctx, inv, "Resource to run") + if err != nil { + return "", err + } + + return key, nil +} + +// resolveRunArgument resolves the resource key to run. +// It returns the remaining arguments to pass to the runner, if applicable. +func resolveRunArgument(ctx context.Context, b *bundle.Bundle, args []string) (string, []string, error) { + // If no arguments are specified, prompt the user to select something to run. + if len(args) == 0 && cmdio.IsPromptSupported(ctx) { + key, err := promptRunArgument(ctx, b) + if err != nil { + return "", nil, err + } + return key, args, nil + } + + if len(args) < 1 { + return "", nil, fmt.Errorf("expected a KEY of the resource to run") + } + + return args[0], args[1:], nil +} + +func keyToRunner(b *bundle.Bundle, arg string) (run.Runner, error) { + // Locate the resource to run. + ref, err := resources.Lookup(b, arg, run.IsRunnable) + if err != nil { + return nil, err + } + + // Convert the resource to a runnable resource. + runner, err := run.ToRunner(b, ref) + if err != nil { + return nil, err + } + + return runner, nil +} + func newRunCommand() *cobra.Command { cmd := &cobra.Command{ Use: "run [flags] KEY", @@ -60,22 +115,9 @@ task or a Python wheel task, the second example applies. return err } - // If no arguments are specified, prompt the user to select something to run. - if len(args) == 0 && cmdio.IsPromptSupported(ctx) { - // Invert completions from KEY -> NAME, to NAME -> KEY. - inv := make(map[string]string) - for k, v := range run.ResourceCompletionMap(b) { - inv[v] = k - } - id, err := cmdio.Select(ctx, inv, "Resource to run") - if err != nil { - return err - } - args = append(args, id) - } - - if len(args) < 1 { - return fmt.Errorf("expected a KEY of the resource to run") + key, args, err := resolveRunArgument(ctx, b, args) + if err != nil { + return err } diags = bundle.Apply(ctx, b, bundle.Seq( @@ -88,31 +130,28 @@ task or a Python wheel task, the second example applies. return err } - runner, err := run.Find(b, args[0]) + runner, err := keyToRunner(b, key) if err != nil { return err } // Parse additional positional arguments. - err = runner.ParseArgs(args[1:], &runOptions) + err = runner.ParseArgs(args, &runOptions) if err != nil { return err } runOptions.NoWait = noWait + var output output.RunOutput if restart { - s := cmdio.Spinner(ctx) - s <- "Cancelling all runs" - err := runner.Cancel(ctx) - close(s) - if err != nil { - return err - } + output, err = runner.Restart(ctx, &runOptions) + } else { + output, err = runner.Run(ctx, &runOptions) } - output, err := runner.Run(ctx, &runOptions) if err != nil { return err } + if output != nil { switch root.OutputType(cmd) { case flags.OutputText: @@ -148,10 +187,11 @@ task or a Python wheel task, the second example applies. } if len(args) == 0 { - return run.ResourceCompletions(b), cobra.ShellCompDirectiveNoFileComp + completions := resources.Completions(b, run.IsRunnable) + return maps.Keys(completions), cobra.ShellCompDirectiveNoFileComp } else { // If we know the resource to run, we can complete additional positional arguments. - runner, err := run.Find(b, args[0]) + runner, err := keyToRunner(b, args[0]) if err != nil { return nil, cobra.ShellCompDirectiveError } diff --git a/cmd/bundle/sync.go b/cmd/bundle/sync.go index df3e087c..274bba0e 100644 --- a/cmd/bundle/sync.go +++ b/cmd/bundle/sync.go @@ -1,7 +1,9 @@ package bundle import ( + "context" "fmt" + "io" "time" "github.com/databricks/cli/bundle" @@ -9,6 +11,7 @@ import ( "github.com/databricks/cli/bundle/phases" "github.com/databricks/cli/cmd/bundle/utils" "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/flags" "github.com/databricks/cli/libs/log" "github.com/databricks/cli/libs/sync" "github.com/spf13/cobra" @@ -18,6 +21,7 @@ type syncFlags struct { interval time.Duration full bool watch bool + output flags.Output } func (f *syncFlags) syncOptionsFromBundle(cmd *cobra.Command, b *bundle.Bundle) (*sync.SyncOptions, error) { @@ -26,6 +30,21 @@ func (f *syncFlags) syncOptionsFromBundle(cmd *cobra.Command, b *bundle.Bundle) return nil, fmt.Errorf("cannot get sync options: %w", err) } + if f.output != "" { + var outputFunc func(context.Context, <-chan sync.Event, io.Writer) + switch f.output { + case flags.OutputText: + outputFunc = sync.TextOutput + case flags.OutputJSON: + outputFunc = sync.JsonOutput + } + if outputFunc != nil { + opts.OutputHandler = func(ctx context.Context, c <-chan sync.Event) { + outputFunc(ctx, c, cmd.OutOrStdout()) + } + } + } + opts.Full = f.full opts.PollInterval = f.interval return opts, nil @@ -42,6 +61,7 @@ func newSyncCommand() *cobra.Command { cmd.Flags().DurationVar(&f.interval, "interval", 1*time.Second, "file system polling interval (for --watch)") cmd.Flags().BoolVar(&f.full, "full", false, "perform full synchronization (default is incremental)") cmd.Flags().BoolVar(&f.watch, "watch", false, "watch local file system for changes") + cmd.Flags().Var(&f.output, "output", "type of the output format") cmd.RunE = func(cmd *cobra.Command, args []string) error { ctx := cmd.Context() @@ -65,6 +85,7 @@ func newSyncCommand() *cobra.Command { if err != nil { return err } + defer s.Close() log.Infof(ctx, "Remote file sync location: %v", opts.RemotePath) diff --git a/cmd/fs/helpers_test.go b/cmd/fs/helpers_test.go index 10b4aa16..a01035cc 100644 --- a/cmd/fs/helpers_test.go +++ b/cmd/fs/helpers_test.go @@ -7,6 +7,7 @@ import ( "testing" "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/fakefs" "github.com/databricks/cli/libs/filer" "github.com/databricks/databricks-sdk-go/experimental/mocks" "github.com/spf13/cobra" @@ -84,7 +85,7 @@ func setupTest(t *testing.T) (*validArgs, *cobra.Command, *mocks.MockWorkspaceCl cmd, m := setupCommand(t) fakeFilerForPath := func(ctx context.Context, fullPath string) (filer.Filer, string, error) { - fakeFiler := filer.NewFakeFiler(map[string]filer.FakeFileInfo{ + fakeFiler := filer.NewFakeFiler(map[string]fakefs.FileInfo{ "dir": {FakeName: "root", FakeDir: true}, "dir/dirA": {FakeDir: true}, "dir/dirB": {FakeDir: true}, diff --git a/cmd/root/root.go b/cmd/root/root.go index eda873d1..e6f66f12 100644 --- a/cmd/root/root.go +++ b/cmd/root/root.go @@ -11,6 +11,7 @@ import ( "github.com/databricks/cli/internal/build" "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/dbr" "github.com/databricks/cli/libs/log" "github.com/spf13/cobra" ) @@ -73,8 +74,12 @@ func New(ctx context.Context) *cobra.Command { // get the context back ctx = cmd.Context() + // Detect if the CLI is running on DBR and store this on the context. + ctx = dbr.DetectRuntime(ctx) + // Configure our user agent with the command that's about to be executed. ctx = withCommandInUserAgent(ctx, cmd) + ctx = withCommandExecIdInUserAgent(ctx) ctx = withUpstreamInUserAgent(ctx) cmd.SetContext(ctx) return nil diff --git a/cmd/root/user_agent_command_exec_id.go b/cmd/root/user_agent_command_exec_id.go new file mode 100644 index 00000000..3bf32b70 --- /dev/null +++ b/cmd/root/user_agent_command_exec_id.go @@ -0,0 +1,14 @@ +package root + +import ( + "context" + + "github.com/databricks/databricks-sdk-go/useragent" + "github.com/google/uuid" +) + +func withCommandExecIdInUserAgent(ctx context.Context) context.Context { + // A UUID that will allow us to correlate multiple API requests made by + // the same CLI invocation. + return useragent.InContext(ctx, "cmd-exec-id", uuid.New().String()) +} diff --git a/cmd/root/user_agent_command_exec_id_test.go b/cmd/root/user_agent_command_exec_id_test.go new file mode 100644 index 00000000..5c436510 --- /dev/null +++ b/cmd/root/user_agent_command_exec_id_test.go @@ -0,0 +1,26 @@ +package root + +import ( + "context" + "regexp" + "testing" + + "github.com/databricks/databricks-sdk-go/useragent" + "github.com/google/uuid" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestWithCommandExecIdInUserAgent(t *testing.T) { + ctx := withCommandExecIdInUserAgent(context.Background()) + + // Check that the command exec ID is in the user agent string. + ua := useragent.FromContext(ctx) + re := regexp.MustCompile(`cmd-exec-id/([a-f0-9-]+)`) + matches := re.FindAllStringSubmatch(ua, -1) + + // Assert that we have exactly one match and that it's a valid UUID. + require.Len(t, matches, 1) + _, err := uuid.Parse(matches[0][1]) + assert.NoError(t, err) +} diff --git a/cmd/root/user_agent_command_test.go b/cmd/root/user_agent_command_test.go index 9620bb5b..a3f5bbcb 100644 --- a/cmd/root/user_agent_command_test.go +++ b/cmd/root/user_agent_command_test.go @@ -1,13 +1,15 @@ package root import ( + "context" "testing" + "github.com/databricks/databricks-sdk-go/useragent" "github.com/spf13/cobra" "github.com/stretchr/testify/assert" ) -func TestCommandString(t *testing.T) { +func TestWithCommandInUserAgent(t *testing.T) { root := &cobra.Command{ Use: "root", } @@ -26,4 +28,9 @@ func TestCommandString(t *testing.T) { assert.Equal(t, "root", commandString(root)) assert.Equal(t, "hello", commandString(hello)) assert.Equal(t, "hello_world", commandString(world)) + + ctx := withCommandInUserAgent(context.Background(), world) + + ua := useragent.FromContext(ctx) + assert.Contains(t, ua, "cmd/hello_world") } diff --git a/cmd/workspace/aibi-dashboard-embedding-access-policy/aibi-dashboard-embedding-access-policy.go b/cmd/workspace/aibi-dashboard-embedding-access-policy/aibi-dashboard-embedding-access-policy.go new file mode 100755 index 00000000..b1adf610 --- /dev/null +++ b/cmd/workspace/aibi-dashboard-embedding-access-policy/aibi-dashboard-embedding-access-policy.go @@ -0,0 +1,162 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package aibi_dashboard_embedding_access_policy + +import ( + "fmt" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/settings" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "aibi-dashboard-embedding-access-policy", + Short: `Controls whether AI/BI published dashboard embedding is enabled, conditionally enabled, or disabled at the workspace level.`, + Long: `Controls whether AI/BI published dashboard embedding is enabled, conditionally + enabled, or disabled at the workspace level. By default, this setting is + conditionally enabled (ALLOW_APPROVED_DOMAINS).`, + } + + // Add methods + cmd.AddCommand(newGet()) + cmd.AddCommand(newUpdate()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start get command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getOverrides []func( + *cobra.Command, + *settings.GetAibiDashboardEmbeddingAccessPolicySettingRequest, +) + +func newGet() *cobra.Command { + cmd := &cobra.Command{} + + var getReq settings.GetAibiDashboardEmbeddingAccessPolicySettingRequest + + // TODO: short flags + + cmd.Flags().StringVar(&getReq.Etag, "etag", getReq.Etag, `etag used for versioning.`) + + cmd.Use = "get" + cmd.Short = `Retrieve the AI/BI dashboard embedding access policy.` + cmd.Long = `Retrieve the AI/BI dashboard embedding access policy. + + Retrieves the AI/BI dashboard embedding access policy. The default setting is + ALLOW_APPROVED_DOMAINS, permitting AI/BI dashboards to be embedded on approved + domains.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + response, err := w.Settings.AibiDashboardEmbeddingAccessPolicy().Get(ctx, getReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getOverrides { + fn(cmd, &getReq) + } + + return cmd +} + +// start update command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateOverrides []func( + *cobra.Command, + *settings.UpdateAibiDashboardEmbeddingAccessPolicySettingRequest, +) + +func newUpdate() *cobra.Command { + cmd := &cobra.Command{} + + var updateReq settings.UpdateAibiDashboardEmbeddingAccessPolicySettingRequest + var updateJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Use = "update" + cmd.Short = `Update the AI/BI dashboard embedding access policy.` + cmd.Long = `Update the AI/BI dashboard embedding access policy. + + Updates the AI/BI dashboard embedding access policy at the workspace level.` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + diags := updateJson.Unmarshal(&updateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } + } + } else { + return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") + } + + response, err := w.Settings.AibiDashboardEmbeddingAccessPolicy().Update(ctx, updateReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateOverrides { + fn(cmd, &updateReq) + } + + return cmd +} + +// end service AibiDashboardEmbeddingAccessPolicy diff --git a/cmd/workspace/aibi-dashboard-embedding-approved-domains/aibi-dashboard-embedding-approved-domains.go b/cmd/workspace/aibi-dashboard-embedding-approved-domains/aibi-dashboard-embedding-approved-domains.go new file mode 100755 index 00000000..48119746 --- /dev/null +++ b/cmd/workspace/aibi-dashboard-embedding-approved-domains/aibi-dashboard-embedding-approved-domains.go @@ -0,0 +1,162 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package aibi_dashboard_embedding_approved_domains + +import ( + "fmt" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/settings" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "aibi-dashboard-embedding-approved-domains", + Short: `Controls the list of domains approved to host the embedded AI/BI dashboards.`, + Long: `Controls the list of domains approved to host the embedded AI/BI dashboards. + The approved domains list can't be mutated when the current access policy is + not set to ALLOW_APPROVED_DOMAINS.`, + } + + // Add methods + cmd.AddCommand(newGet()) + cmd.AddCommand(newUpdate()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start get command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getOverrides []func( + *cobra.Command, + *settings.GetAibiDashboardEmbeddingApprovedDomainsSettingRequest, +) + +func newGet() *cobra.Command { + cmd := &cobra.Command{} + + var getReq settings.GetAibiDashboardEmbeddingApprovedDomainsSettingRequest + + // TODO: short flags + + cmd.Flags().StringVar(&getReq.Etag, "etag", getReq.Etag, `etag used for versioning.`) + + cmd.Use = "get" + cmd.Short = `Retrieve the list of domains approved to host embedded AI/BI dashboards.` + cmd.Long = `Retrieve the list of domains approved to host embedded AI/BI dashboards. + + Retrieves the list of domains approved to host embedded AI/BI dashboards.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + response, err := w.Settings.AibiDashboardEmbeddingApprovedDomains().Get(ctx, getReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getOverrides { + fn(cmd, &getReq) + } + + return cmd +} + +// start update command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateOverrides []func( + *cobra.Command, + *settings.UpdateAibiDashboardEmbeddingApprovedDomainsSettingRequest, +) + +func newUpdate() *cobra.Command { + cmd := &cobra.Command{} + + var updateReq settings.UpdateAibiDashboardEmbeddingApprovedDomainsSettingRequest + var updateJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Use = "update" + cmd.Short = `Update the list of domains approved to host embedded AI/BI dashboards.` + cmd.Long = `Update the list of domains approved to host embedded AI/BI dashboards. + + Updates the list of domains approved to host embedded AI/BI dashboards. This + update will fail if the current workspace access policy is not + ALLOW_APPROVED_DOMAINS.` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + diags := updateJson.Unmarshal(&updateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } + } + } else { + return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") + } + + response, err := w.Settings.AibiDashboardEmbeddingApprovedDomains().Update(ctx, updateReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateOverrides { + fn(cmd, &updateReq) + } + + return cmd +} + +// end service AibiDashboardEmbeddingApprovedDomains diff --git a/cmd/workspace/apps/apps.go b/cmd/workspace/apps/apps.go index 4cee2f82..514da697 100755 --- a/cmd/workspace/apps/apps.go +++ b/cmd/workspace/apps/apps.go @@ -28,9 +28,6 @@ func New() *cobra.Command { Annotations: map[string]string{ "package": "apps", }, - - // This service is being previewed; hide from help output. - Hidden: true, } // Add methods @@ -70,6 +67,7 @@ func newCreate() *cobra.Command { cmd := &cobra.Command{} var createReq apps.CreateAppRequest + createReq.App = &apps.App{} var createJson flags.JsonFlag var createSkipWait bool @@ -80,7 +78,11 @@ func newCreate() *cobra.Command { // TODO: short flags cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) - cmd.Flags().StringVar(&createReq.Description, "description", createReq.Description, `The description of the app.`) + // TODO: complex arg: active_deployment + // TODO: complex arg: app_status + // TODO: complex arg: compute_status + cmd.Flags().StringVar(&createReq.App.Description, "description", createReq.App.Description, `The description of the app.`) + // TODO: complex arg: pending_deployment // TODO: array: resources cmd.Use = "create NAME" @@ -113,7 +115,7 @@ func newCreate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - diags := createJson.Unmarshal(&createReq) + diags := createJson.Unmarshal(&createReq.App) if diags.HasError() { return diags.Error() } @@ -125,7 +127,7 @@ func newCreate() *cobra.Command { } } if !cmd.Flags().Changed("json") { - createReq.Name = args[0] + createReq.App.Name = args[0] } wait, err := w.Apps.Create(ctx, createReq) @@ -237,6 +239,7 @@ func newDeploy() *cobra.Command { cmd := &cobra.Command{} var deployReq apps.CreateAppDeploymentRequest + deployReq.AppDeployment = &apps.AppDeployment{} var deployJson flags.JsonFlag var deploySkipWait bool @@ -247,9 +250,11 @@ func newDeploy() *cobra.Command { // TODO: short flags cmd.Flags().Var(&deployJson, "json", `either inline JSON string or @path/to/file.json with request body`) - cmd.Flags().StringVar(&deployReq.DeploymentId, "deployment-id", deployReq.DeploymentId, `The unique id of the deployment.`) - cmd.Flags().Var(&deployReq.Mode, "mode", `The mode of which the deployment will manage the source code. Supported values: [AUTO_SYNC, SNAPSHOT]`) - cmd.Flags().StringVar(&deployReq.SourceCodePath, "source-code-path", deployReq.SourceCodePath, `The workspace file system path of the source code used to create the app deployment.`) + // TODO: complex arg: deployment_artifacts + cmd.Flags().StringVar(&deployReq.AppDeployment.DeploymentId, "deployment-id", deployReq.AppDeployment.DeploymentId, `The unique id of the deployment.`) + cmd.Flags().Var(&deployReq.AppDeployment.Mode, "mode", `The mode of which the deployment will manage the source code. Supported values: [AUTO_SYNC, SNAPSHOT]`) + cmd.Flags().StringVar(&deployReq.AppDeployment.SourceCodePath, "source-code-path", deployReq.AppDeployment.SourceCodePath, `The workspace file system path of the source code used to create the app deployment.`) + // TODO: complex arg: status cmd.Use = "deploy APP_NAME" cmd.Short = `Create an app deployment.` @@ -273,7 +278,7 @@ func newDeploy() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - diags := deployJson.Unmarshal(&deployReq) + diags := deployJson.Unmarshal(&deployReq.AppDeployment) if diags.HasError() { return diags.Error() } @@ -695,8 +700,9 @@ func newSetPermissions() *cobra.Command { cmd.Short = `Set app permissions.` cmd.Long = `Set app permissions. - Sets permissions on an app. Apps can inherit permissions from their root - object. + Sets permissions on an object, replacing existing permissions if they exist. + Deletes all direct permissions if none are specified. Objects can inherit + permissions from their root object. Arguments: APP_NAME: The app for which to get or manage permissions.` @@ -923,28 +929,41 @@ func newUpdate() *cobra.Command { cmd := &cobra.Command{} var updateReq apps.UpdateAppRequest + updateReq.App = &apps.App{} var updateJson flags.JsonFlag // TODO: short flags cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) - cmd.Flags().StringVar(&updateReq.Description, "description", updateReq.Description, `The description of the app.`) + // TODO: complex arg: active_deployment + // TODO: complex arg: app_status + // TODO: complex arg: compute_status + cmd.Flags().StringVar(&updateReq.App.Description, "description", updateReq.App.Description, `The description of the app.`) + // TODO: complex arg: pending_deployment // TODO: array: resources - cmd.Use = "update NAME" + cmd.Use = "update NAME NAME" cmd.Short = `Update an app.` cmd.Long = `Update an app. Updates the app with the supplied name. Arguments: + NAME: The name of the app. NAME: The name of the app. The name must contain only lowercase alphanumeric characters and hyphens. It must be unique within the workspace.` cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := root.ExactArgs(1) + if cmd.Flags().Changed("json") { + err := root.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name' in your JSON input") + } + return nil + } + check := root.ExactArgs(2) return check(cmd, args) } @@ -954,7 +973,7 @@ func newUpdate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - diags := updateJson.Unmarshal(&updateReq) + diags := updateJson.Unmarshal(&updateReq.App) if diags.HasError() { return diags.Error() } @@ -966,6 +985,9 @@ func newUpdate() *cobra.Command { } } updateReq.Name = args[0] + if !cmd.Flags().Changed("json") { + updateReq.App.Name = args[1] + } response, err := w.Apps.Update(ctx, updateReq) if err != nil { diff --git a/cmd/workspace/apps/overrides.go b/cmd/workspace/apps/overrides.go new file mode 100644 index 00000000..debd9f5a --- /dev/null +++ b/cmd/workspace/apps/overrides.go @@ -0,0 +1,59 @@ +package apps + +import ( + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/apps" + "github.com/spf13/cobra" +) + +// We override apps.Update command beccause currently genkit does not support +// a way to identify that path field (such as name) matches the field in the request body. +// As a result, genkit generates a command with 2 required same fields, update NAME NAME. +// This override should be removed when genkit supports this. +func updateOverride(cmd *cobra.Command, req *apps.UpdateAppRequest) { + cmd.Use = "update NAME" + cmd.Long = `Update an app. + + Updates the app with the supplied name. + + Arguments: + NAME: The name of the app. The name must contain only lowercase alphanumeric + characters and hyphens. It must be unique within the workspace.` + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + updateJson := cmd.Flag("json").Value.(*flags.JsonFlag) + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + diags := updateJson.Unmarshal(&req.App) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } + } + } + + req.Name = args[0] + response, err := w.Apps.Update(ctx, *req) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } +} + +func init() { + updateOverrides = append(updateOverrides, updateOverride) +} diff --git a/cmd/workspace/clean-rooms/clean-rooms.go b/cmd/workspace/clean-rooms/clean-rooms.go deleted file mode 100755 index 72560b84..00000000 --- a/cmd/workspace/clean-rooms/clean-rooms.go +++ /dev/null @@ -1,385 +0,0 @@ -// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. - -package clean_rooms - -import ( - "fmt" - - "github.com/databricks/cli/cmd/root" - "github.com/databricks/cli/libs/cmdio" - "github.com/databricks/cli/libs/flags" - "github.com/databricks/databricks-sdk-go/service/sharing" - "github.com/spf13/cobra" -) - -// Slice with functions to override default command behavior. -// Functions can be added from the `init()` function in manually curated files in this directory. -var cmdOverrides []func(*cobra.Command) - -func New() *cobra.Command { - cmd := &cobra.Command{ - Use: "clean-rooms", - Short: `A clean room is a secure, privacy-protecting environment where two or more parties can share sensitive enterprise data, including customer data, for measurements, insights, activation and other use cases.`, - Long: `A clean room is a secure, privacy-protecting environment where two or more - parties can share sensitive enterprise data, including customer data, for - measurements, insights, activation and other use cases. - - To create clean rooms, you must be a metastore admin or a user with the - **CREATE_CLEAN_ROOM** privilege.`, - GroupID: "sharing", - Annotations: map[string]string{ - "package": "sharing", - }, - - // This service is being previewed; hide from help output. - Hidden: true, - } - - // Add methods - cmd.AddCommand(newCreate()) - cmd.AddCommand(newDelete()) - cmd.AddCommand(newGet()) - cmd.AddCommand(newList()) - cmd.AddCommand(newUpdate()) - - // Apply optional overrides to this command. - for _, fn := range cmdOverrides { - fn(cmd) - } - - return cmd -} - -// start create command - -// Slice with functions to override default command behavior. -// Functions can be added from the `init()` function in manually curated files in this directory. -var createOverrides []func( - *cobra.Command, - *sharing.CreateCleanRoom, -) - -func newCreate() *cobra.Command { - cmd := &cobra.Command{} - - var createReq sharing.CreateCleanRoom - var createJson flags.JsonFlag - - // TODO: short flags - cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) - - cmd.Flags().StringVar(&createReq.Comment, "comment", createReq.Comment, `User-provided free-form text description.`) - - cmd.Use = "create" - cmd.Short = `Create a clean room.` - cmd.Long = `Create a clean room. - - Creates a new clean room with specified colaborators. The caller must be a - metastore admin or have the **CREATE_CLEAN_ROOM** privilege on the metastore.` - - cmd.Annotations = make(map[string]string) - - cmd.PreRunE = root.MustWorkspaceClient - cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { - ctx := cmd.Context() - w := root.WorkspaceClient(ctx) - - if cmd.Flags().Changed("json") { - diags := createJson.Unmarshal(&createReq) - if diags.HasError() { - return diags.Error() - } - if len(diags) > 0 { - err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) - if err != nil { - return err - } - } - } else { - return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") - } - - response, err := w.CleanRooms.Create(ctx, createReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) - } - - // Disable completions since they are not applicable. - // Can be overridden by manual implementation in `override.go`. - cmd.ValidArgsFunction = cobra.NoFileCompletions - - // Apply optional overrides to this command. - for _, fn := range createOverrides { - fn(cmd, &createReq) - } - - return cmd -} - -// start delete command - -// Slice with functions to override default command behavior. -// Functions can be added from the `init()` function in manually curated files in this directory. -var deleteOverrides []func( - *cobra.Command, - *sharing.DeleteCleanRoomRequest, -) - -func newDelete() *cobra.Command { - cmd := &cobra.Command{} - - var deleteReq sharing.DeleteCleanRoomRequest - - // TODO: short flags - - cmd.Use = "delete NAME" - cmd.Short = `Delete a clean room.` - cmd.Long = `Delete a clean room. - - Deletes a data object clean room from the metastore. The caller must be an - owner of the clean room. - - Arguments: - NAME: The name of the clean room.` - - cmd.Annotations = make(map[string]string) - - cmd.Args = func(cmd *cobra.Command, args []string) error { - check := root.ExactArgs(1) - return check(cmd, args) - } - - cmd.PreRunE = root.MustWorkspaceClient - cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { - ctx := cmd.Context() - w := root.WorkspaceClient(ctx) - - deleteReq.Name = args[0] - - err = w.CleanRooms.Delete(ctx, deleteReq) - if err != nil { - return err - } - return nil - } - - // Disable completions since they are not applicable. - // Can be overridden by manual implementation in `override.go`. - cmd.ValidArgsFunction = cobra.NoFileCompletions - - // Apply optional overrides to this command. - for _, fn := range deleteOverrides { - fn(cmd, &deleteReq) - } - - return cmd -} - -// start get command - -// Slice with functions to override default command behavior. -// Functions can be added from the `init()` function in manually curated files in this directory. -var getOverrides []func( - *cobra.Command, - *sharing.GetCleanRoomRequest, -) - -func newGet() *cobra.Command { - cmd := &cobra.Command{} - - var getReq sharing.GetCleanRoomRequest - - // TODO: short flags - - cmd.Flags().BoolVar(&getReq.IncludeRemoteDetails, "include-remote-details", getReq.IncludeRemoteDetails, `Whether to include remote details (central) on the clean room.`) - - cmd.Use = "get NAME" - cmd.Short = `Get a clean room.` - cmd.Long = `Get a clean room. - - Gets a data object clean room from the metastore. The caller must be a - metastore admin or the owner of the clean room. - - Arguments: - NAME: The name of the clean room.` - - cmd.Annotations = make(map[string]string) - - cmd.Args = func(cmd *cobra.Command, args []string) error { - check := root.ExactArgs(1) - return check(cmd, args) - } - - cmd.PreRunE = root.MustWorkspaceClient - cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { - ctx := cmd.Context() - w := root.WorkspaceClient(ctx) - - getReq.Name = args[0] - - response, err := w.CleanRooms.Get(ctx, getReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) - } - - // Disable completions since they are not applicable. - // Can be overridden by manual implementation in `override.go`. - cmd.ValidArgsFunction = cobra.NoFileCompletions - - // Apply optional overrides to this command. - for _, fn := range getOverrides { - fn(cmd, &getReq) - } - - return cmd -} - -// start list command - -// Slice with functions to override default command behavior. -// Functions can be added from the `init()` function in manually curated files in this directory. -var listOverrides []func( - *cobra.Command, - *sharing.ListCleanRoomsRequest, -) - -func newList() *cobra.Command { - cmd := &cobra.Command{} - - var listReq sharing.ListCleanRoomsRequest - - // TODO: short flags - - cmd.Flags().IntVar(&listReq.MaxResults, "max-results", listReq.MaxResults, `Maximum number of clean rooms to return.`) - cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `Opaque pagination token to go to next page based on previous query.`) - - cmd.Use = "list" - cmd.Short = `List clean rooms.` - cmd.Long = `List clean rooms. - - Gets an array of data object clean rooms from the metastore. The caller must - be a metastore admin or the owner of the clean room. There is no guarantee of - a specific ordering of the elements in the array.` - - cmd.Annotations = make(map[string]string) - - cmd.Args = func(cmd *cobra.Command, args []string) error { - check := root.ExactArgs(0) - return check(cmd, args) - } - - cmd.PreRunE = root.MustWorkspaceClient - cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { - ctx := cmd.Context() - w := root.WorkspaceClient(ctx) - - response := w.CleanRooms.List(ctx, listReq) - return cmdio.RenderIterator(ctx, response) - } - - // Disable completions since they are not applicable. - // Can be overridden by manual implementation in `override.go`. - cmd.ValidArgsFunction = cobra.NoFileCompletions - - // Apply optional overrides to this command. - for _, fn := range listOverrides { - fn(cmd, &listReq) - } - - return cmd -} - -// start update command - -// Slice with functions to override default command behavior. -// Functions can be added from the `init()` function in manually curated files in this directory. -var updateOverrides []func( - *cobra.Command, - *sharing.UpdateCleanRoom, -) - -func newUpdate() *cobra.Command { - cmd := &cobra.Command{} - - var updateReq sharing.UpdateCleanRoom - var updateJson flags.JsonFlag - - // TODO: short flags - cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) - - // TODO: array: catalog_updates - cmd.Flags().StringVar(&updateReq.Comment, "comment", updateReq.Comment, `User-provided free-form text description.`) - cmd.Flags().StringVar(&updateReq.Owner, "owner", updateReq.Owner, `Username of current owner of clean room.`) - - cmd.Use = "update NAME" - cmd.Short = `Update a clean room.` - cmd.Long = `Update a clean room. - - Updates the clean room with the changes and data objects in the request. The - caller must be the owner of the clean room or a metastore admin. - - When the caller is a metastore admin, only the __owner__ field can be updated. - - In the case that the clean room name is changed **updateCleanRoom** requires - that the caller is both the clean room owner and a metastore admin. - - For each table that is added through this method, the clean room owner must - also have **SELECT** privilege on the table. The privilege must be maintained - indefinitely for recipients to be able to access the table. Typically, you - should use a group as the clean room owner. - - Table removals through **update** do not require additional privileges. - - Arguments: - NAME: The name of the clean room.` - - cmd.Annotations = make(map[string]string) - - cmd.Args = func(cmd *cobra.Command, args []string) error { - check := root.ExactArgs(1) - return check(cmd, args) - } - - cmd.PreRunE = root.MustWorkspaceClient - cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { - ctx := cmd.Context() - w := root.WorkspaceClient(ctx) - - if cmd.Flags().Changed("json") { - diags := updateJson.Unmarshal(&updateReq) - if diags.HasError() { - return diags.Error() - } - if len(diags) > 0 { - err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) - if err != nil { - return err - } - } - } - updateReq.Name = args[0] - - response, err := w.CleanRooms.Update(ctx, updateReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) - } - - // Disable completions since they are not applicable. - // Can be overridden by manual implementation in `override.go`. - cmd.ValidArgsFunction = cobra.NoFileCompletions - - // Apply optional overrides to this command. - for _, fn := range updateOverrides { - fn(cmd, &updateReq) - } - - return cmd -} - -// end service CleanRooms diff --git a/cmd/workspace/cluster-policies/cluster-policies.go b/cmd/workspace/cluster-policies/cluster-policies.go index b34dd53d..9e50065f 100755 --- a/cmd/workspace/cluster-policies/cluster-policies.go +++ b/cmd/workspace/cluster-policies/cluster-policies.go @@ -634,8 +634,9 @@ func newSetPermissions() *cobra.Command { cmd.Short = `Set cluster policy permissions.` cmd.Long = `Set cluster policy permissions. - Sets permissions on a cluster policy. Cluster policies can inherit permissions - from their root object. + Sets permissions on an object, replacing existing permissions if they exist. + Deletes all direct permissions if none are specified. Objects can inherit + permissions from their root object. Arguments: CLUSTER_POLICY_ID: The cluster policy for which to get or manage permissions.` diff --git a/cmd/workspace/clusters/clusters.go b/cmd/workspace/clusters/clusters.go index 0ed454de..db788753 100755 --- a/cmd/workspace/clusters/clusters.go +++ b/cmd/workspace/clusters/clusters.go @@ -512,7 +512,7 @@ func newEdit() *cobra.Command { Clusters created by the Databricks Jobs service cannot be edited. Arguments: - CLUSTER_ID: ID of the cluser + CLUSTER_ID: ID of the cluster SPARK_VERSION: The Spark version of the cluster, e.g. 3.3.x-scala2.11. A list of available Spark versions can be retrieved by using the :method:clusters/sparkVersions API call.` @@ -1504,8 +1504,9 @@ func newSetPermissions() *cobra.Command { cmd.Short = `Set cluster permissions.` cmd.Long = `Set cluster permissions. - Sets permissions on a cluster. Clusters can inherit permissions from their - root object. + Sets permissions on an object, replacing existing permissions if they exist. + Deletes all direct permissions if none are specified. Objects can inherit + permissions from their root object. Arguments: CLUSTER_ID: The cluster for which to get or manage permissions.` diff --git a/cmd/workspace/cmd.go b/cmd/workspace/cmd.go index 3fe5b268..9cb3cca9 100755 --- a/cmd/workspace/cmd.go +++ b/cmd/workspace/cmd.go @@ -8,7 +8,6 @@ import ( apps "github.com/databricks/cli/cmd/workspace/apps" artifact_allowlists "github.com/databricks/cli/cmd/workspace/artifact-allowlists" catalogs "github.com/databricks/cli/cmd/workspace/catalogs" - clean_rooms "github.com/databricks/cli/cmd/workspace/clean-rooms" cluster_policies "github.com/databricks/cli/cmd/workspace/cluster-policies" clusters "github.com/databricks/cli/cmd/workspace/clusters" connections "github.com/databricks/cli/cmd/workspace/connections" @@ -17,6 +16,7 @@ import ( consumer_listings "github.com/databricks/cli/cmd/workspace/consumer-listings" consumer_personalization_requests "github.com/databricks/cli/cmd/workspace/consumer-personalization-requests" consumer_providers "github.com/databricks/cli/cmd/workspace/consumer-providers" + credentials "github.com/databricks/cli/cmd/workspace/credentials" credentials_manager "github.com/databricks/cli/cmd/workspace/credentials-manager" current_user "github.com/databricks/cli/cmd/workspace/current-user" dashboard_widgets "github.com/databricks/cli/cmd/workspace/dashboard-widgets" @@ -98,7 +98,6 @@ func All() []*cobra.Command { out = append(out, apps.New()) out = append(out, artifact_allowlists.New()) out = append(out, catalogs.New()) - out = append(out, clean_rooms.New()) out = append(out, cluster_policies.New()) out = append(out, clusters.New()) out = append(out, connections.New()) @@ -107,6 +106,7 @@ func All() []*cobra.Command { out = append(out, consumer_listings.New()) out = append(out, consumer_personalization_requests.New()) out = append(out, consumer_providers.New()) + out = append(out, credentials.New()) out = append(out, credentials_manager.New()) out = append(out, current_user.New()) out = append(out, dashboard_widgets.New()) diff --git a/cmd/workspace/credentials/credentials.go b/cmd/workspace/credentials/credentials.go new file mode 100755 index 00000000..869df062 --- /dev/null +++ b/cmd/workspace/credentials/credentials.go @@ -0,0 +1,545 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package credentials + +import ( + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/catalog" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "credentials", + Short: `A credential represents an authentication and authorization mechanism for accessing services on your cloud tenant.`, + Long: `A credential represents an authentication and authorization mechanism for + accessing services on your cloud tenant. Each credential is subject to Unity + Catalog access-control policies that control which users and groups can access + the credential. + + To create credentials, you must be a Databricks account admin or have the + CREATE SERVICE CREDENTIAL privilege. The user who creates the credential can + delegate ownership to another user or group to manage permissions on it`, + GroupID: "catalog", + Annotations: map[string]string{ + "package": "catalog", + }, + + // This service is being previewed; hide from help output. + Hidden: true, + } + + // Add methods + cmd.AddCommand(newCreateCredential()) + cmd.AddCommand(newDeleteCredential()) + cmd.AddCommand(newGenerateTemporaryServiceCredential()) + cmd.AddCommand(newGetCredential()) + cmd.AddCommand(newListCredentials()) + cmd.AddCommand(newUpdateCredential()) + cmd.AddCommand(newValidateCredential()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start create-credential command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var createCredentialOverrides []func( + *cobra.Command, + *catalog.CreateCredentialRequest, +) + +func newCreateCredential() *cobra.Command { + cmd := &cobra.Command{} + + var createCredentialReq catalog.CreateCredentialRequest + var createCredentialJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&createCredentialJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + // TODO: complex arg: aws_iam_role + // TODO: complex arg: azure_managed_identity + cmd.Flags().StringVar(&createCredentialReq.Comment, "comment", createCredentialReq.Comment, `Comment associated with the credential.`) + cmd.Flags().StringVar(&createCredentialReq.Name, "name", createCredentialReq.Name, `The credential name.`) + cmd.Flags().Var(&createCredentialReq.Purpose, "purpose", `Indicates the purpose of the credential. Supported values: [SERVICE]`) + cmd.Flags().BoolVar(&createCredentialReq.SkipValidation, "skip-validation", createCredentialReq.SkipValidation, `Optional.`) + + cmd.Use = "create-credential" + cmd.Short = `Create a credential.` + cmd.Long = `Create a credential. + + Creates a new credential.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + diags := createCredentialJson.Unmarshal(&createCredentialReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } + } + } + + response, err := w.Credentials.CreateCredential(ctx, createCredentialReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range createCredentialOverrides { + fn(cmd, &createCredentialReq) + } + + return cmd +} + +// start delete-credential command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteCredentialOverrides []func( + *cobra.Command, + *catalog.DeleteCredentialRequest, +) + +func newDeleteCredential() *cobra.Command { + cmd := &cobra.Command{} + + var deleteCredentialReq catalog.DeleteCredentialRequest + + // TODO: short flags + + cmd.Flags().BoolVar(&deleteCredentialReq.Force, "force", deleteCredentialReq.Force, `Force deletion even if there are dependent services.`) + + cmd.Use = "delete-credential NAME_ARG" + cmd.Short = `Delete a credential.` + cmd.Long = `Delete a credential. + + Deletes a credential from the metastore. The caller must be an owner of the + credential. + + Arguments: + NAME_ARG: Name of the credential.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + deleteCredentialReq.NameArg = args[0] + + err = w.Credentials.DeleteCredential(ctx, deleteCredentialReq) + if err != nil { + return err + } + return nil + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deleteCredentialOverrides { + fn(cmd, &deleteCredentialReq) + } + + return cmd +} + +// start generate-temporary-service-credential command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var generateTemporaryServiceCredentialOverrides []func( + *cobra.Command, + *catalog.GenerateTemporaryServiceCredentialRequest, +) + +func newGenerateTemporaryServiceCredential() *cobra.Command { + cmd := &cobra.Command{} + + var generateTemporaryServiceCredentialReq catalog.GenerateTemporaryServiceCredentialRequest + var generateTemporaryServiceCredentialJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&generateTemporaryServiceCredentialJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + // TODO: complex arg: azure_options + cmd.Flags().StringVar(&generateTemporaryServiceCredentialReq.CredentialName, "credential-name", generateTemporaryServiceCredentialReq.CredentialName, `The name of the service credential used to generate a temporary credential.`) + + cmd.Use = "generate-temporary-service-credential" + cmd.Short = `Generate a temporary service credential.` + cmd.Long = `Generate a temporary service credential. + + Returns a set of temporary credentials generated using the specified service + credential. The caller must be a metastore admin or have the metastore + privilege **ACCESS** on the service credential.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + diags := generateTemporaryServiceCredentialJson.Unmarshal(&generateTemporaryServiceCredentialReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } + } + } + + response, err := w.Credentials.GenerateTemporaryServiceCredential(ctx, generateTemporaryServiceCredentialReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range generateTemporaryServiceCredentialOverrides { + fn(cmd, &generateTemporaryServiceCredentialReq) + } + + return cmd +} + +// start get-credential command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getCredentialOverrides []func( + *cobra.Command, + *catalog.GetCredentialRequest, +) + +func newGetCredential() *cobra.Command { + cmd := &cobra.Command{} + + var getCredentialReq catalog.GetCredentialRequest + + // TODO: short flags + + cmd.Use = "get-credential NAME_ARG" + cmd.Short = `Get a credential.` + cmd.Long = `Get a credential. + + Gets a credential from the metastore. The caller must be a metastore admin, + the owner of the credential, or have any permission on the credential. + + Arguments: + NAME_ARG: Name of the credential.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + getCredentialReq.NameArg = args[0] + + response, err := w.Credentials.GetCredential(ctx, getCredentialReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getCredentialOverrides { + fn(cmd, &getCredentialReq) + } + + return cmd +} + +// start list-credentials command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var listCredentialsOverrides []func( + *cobra.Command, + *catalog.ListCredentialsRequest, +) + +func newListCredentials() *cobra.Command { + cmd := &cobra.Command{} + + var listCredentialsReq catalog.ListCredentialsRequest + + // TODO: short flags + + cmd.Flags().IntVar(&listCredentialsReq.MaxResults, "max-results", listCredentialsReq.MaxResults, `Maximum number of credentials to return.`) + cmd.Flags().StringVar(&listCredentialsReq.PageToken, "page-token", listCredentialsReq.PageToken, `Opaque token to retrieve the next page of results.`) + cmd.Flags().Var(&listCredentialsReq.Purpose, "purpose", `Return only credentials for the specified purpose. Supported values: [SERVICE]`) + + cmd.Use = "list-credentials" + cmd.Short = `List credentials.` + cmd.Long = `List credentials. + + Gets an array of credentials (as __CredentialInfo__ objects). + + The array is limited to only the credentials that the caller has permission to + access. If the caller is a metastore admin, retrieval of credentials is + unrestricted. There is no guarantee of a specific ordering of the elements in + the array.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + response := w.Credentials.ListCredentials(ctx, listCredentialsReq) + return cmdio.RenderIterator(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range listCredentialsOverrides { + fn(cmd, &listCredentialsReq) + } + + return cmd +} + +// start update-credential command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateCredentialOverrides []func( + *cobra.Command, + *catalog.UpdateCredentialRequest, +) + +func newUpdateCredential() *cobra.Command { + cmd := &cobra.Command{} + + var updateCredentialReq catalog.UpdateCredentialRequest + var updateCredentialJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updateCredentialJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + // TODO: complex arg: aws_iam_role + // TODO: complex arg: azure_managed_identity + cmd.Flags().StringVar(&updateCredentialReq.Comment, "comment", updateCredentialReq.Comment, `Comment associated with the credential.`) + cmd.Flags().BoolVar(&updateCredentialReq.Force, "force", updateCredentialReq.Force, `Force update even if there are dependent services.`) + cmd.Flags().Var(&updateCredentialReq.IsolationMode, "isolation-mode", `Whether the current securable is accessible from all workspaces or a specific set of workspaces. Supported values: [ISOLATION_MODE_ISOLATED, ISOLATION_MODE_OPEN]`) + cmd.Flags().StringVar(&updateCredentialReq.NewName, "new-name", updateCredentialReq.NewName, `New name of credential.`) + cmd.Flags().StringVar(&updateCredentialReq.Owner, "owner", updateCredentialReq.Owner, `Username of current owner of credential.`) + cmd.Flags().BoolVar(&updateCredentialReq.SkipValidation, "skip-validation", updateCredentialReq.SkipValidation, `Supply true to this argument to skip validation of the updated credential.`) + + cmd.Use = "update-credential NAME_ARG" + cmd.Short = `Update a credential.` + cmd.Long = `Update a credential. + + Updates a credential on the metastore. + + The caller must be the owner of the credential or a metastore admin or have + the MANAGE permission. If the caller is a metastore admin, only the + __owner__ field can be changed. + + Arguments: + NAME_ARG: Name of the credential.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + diags := updateCredentialJson.Unmarshal(&updateCredentialReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } + } + } + updateCredentialReq.NameArg = args[0] + + response, err := w.Credentials.UpdateCredential(ctx, updateCredentialReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateCredentialOverrides { + fn(cmd, &updateCredentialReq) + } + + return cmd +} + +// start validate-credential command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var validateCredentialOverrides []func( + *cobra.Command, + *catalog.ValidateCredentialRequest, +) + +func newValidateCredential() *cobra.Command { + cmd := &cobra.Command{} + + var validateCredentialReq catalog.ValidateCredentialRequest + var validateCredentialJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&validateCredentialJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + // TODO: complex arg: aws_iam_role + // TODO: complex arg: azure_managed_identity + cmd.Flags().StringVar(&validateCredentialReq.CredentialName, "credential-name", validateCredentialReq.CredentialName, `Required.`) + cmd.Flags().Var(&validateCredentialReq.Purpose, "purpose", `The purpose of the credential. Supported values: [SERVICE]`) + + cmd.Use = "validate-credential" + cmd.Short = `Validate a credential.` + cmd.Long = `Validate a credential. + + Validates a credential. + + Either the __credential_name__ or the cloud-specific credential must be + provided. + + The caller must be a metastore admin or the credential owner.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + diags := validateCredentialJson.Unmarshal(&validateCredentialReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } + } + } + + response, err := w.Credentials.ValidateCredential(ctx, validateCredentialReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range validateCredentialOverrides { + fn(cmd, &validateCredentialReq) + } + + return cmd +} + +// end service Credentials diff --git a/cmd/workspace/disable-legacy-dbfs/disable-legacy-dbfs.go b/cmd/workspace/disable-legacy-dbfs/disable-legacy-dbfs.go new file mode 100755 index 00000000..d0975537 --- /dev/null +++ b/cmd/workspace/disable-legacy-dbfs/disable-legacy-dbfs.go @@ -0,0 +1,220 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package disable_legacy_dbfs + +import ( + "fmt" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/settings" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "disable-legacy-dbfs", + Short: `When this setting is on, access to DBFS root and DBFS mounts is disallowed (as well as creation of new mounts).`, + Long: `When this setting is on, access to DBFS root and DBFS mounts is disallowed (as + well as creation of new mounts). When the setting is off, all DBFS + functionality is enabled`, + + // This service is being previewed; hide from help output. + Hidden: true, + } + + // Add methods + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newUpdate()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start delete command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteOverrides []func( + *cobra.Command, + *settings.DeleteDisableLegacyDbfsRequest, +) + +func newDelete() *cobra.Command { + cmd := &cobra.Command{} + + var deleteReq settings.DeleteDisableLegacyDbfsRequest + + // TODO: short flags + + cmd.Flags().StringVar(&deleteReq.Etag, "etag", deleteReq.Etag, `etag used for versioning.`) + + cmd.Use = "delete" + cmd.Short = `Delete the disable legacy DBFS setting.` + cmd.Long = `Delete the disable legacy DBFS setting. + + Deletes the disable legacy DBFS setting for a workspace, reverting back to the + default.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + response, err := w.Settings.DisableLegacyDbfs().Delete(ctx, deleteReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deleteOverrides { + fn(cmd, &deleteReq) + } + + return cmd +} + +// start get command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getOverrides []func( + *cobra.Command, + *settings.GetDisableLegacyDbfsRequest, +) + +func newGet() *cobra.Command { + cmd := &cobra.Command{} + + var getReq settings.GetDisableLegacyDbfsRequest + + // TODO: short flags + + cmd.Flags().StringVar(&getReq.Etag, "etag", getReq.Etag, `etag used for versioning.`) + + cmd.Use = "get" + cmd.Short = `Get the disable legacy DBFS setting.` + cmd.Long = `Get the disable legacy DBFS setting. + + Gets the disable legacy DBFS setting.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + response, err := w.Settings.DisableLegacyDbfs().Get(ctx, getReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getOverrides { + fn(cmd, &getReq) + } + + return cmd +} + +// start update command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateOverrides []func( + *cobra.Command, + *settings.UpdateDisableLegacyDbfsRequest, +) + +func newUpdate() *cobra.Command { + cmd := &cobra.Command{} + + var updateReq settings.UpdateDisableLegacyDbfsRequest + var updateJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Use = "update" + cmd.Short = `Update the disable legacy DBFS setting.` + cmd.Long = `Update the disable legacy DBFS setting. + + Updates the disable legacy DBFS setting for the workspace.` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + diags := updateJson.Unmarshal(&updateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } + } + } else { + return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") + } + + response, err := w.Settings.DisableLegacyDbfs().Update(ctx, updateReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateOverrides { + fn(cmd, &updateReq) + } + + return cmd +} + +// end service DisableLegacyDbfs diff --git a/cmd/workspace/experiments/experiments.go b/cmd/workspace/experiments/experiments.go index 4c6b57d1..b5173aeb 100755 --- a/cmd/workspace/experiments/experiments.go +++ b/cmd/workspace/experiments/experiments.go @@ -2034,8 +2034,9 @@ func newSetPermissions() *cobra.Command { cmd.Short = `Set experiment permissions.` cmd.Long = `Set experiment permissions. - Sets permissions on an experiment. Experiments can inherit permissions from - their root object. + Sets permissions on an object, replacing existing permissions if they exist. + Deletes all direct permissions if none are specified. Objects can inherit + permissions from their root object. Arguments: EXPERIMENT_ID: The experiment for which to get or manage permissions.` diff --git a/cmd/workspace/external-locations/external-locations.go b/cmd/workspace/external-locations/external-locations.go index 97d34df0..82fd8d7e 100755 --- a/cmd/workspace/external-locations/external-locations.go +++ b/cmd/workspace/external-locations/external-locations.go @@ -356,7 +356,7 @@ func newUpdate() *cobra.Command { // TODO: complex arg: encryption_details cmd.Flags().BoolVar(&updateReq.Fallback, "fallback", updateReq.Fallback, `Indicates whether fallback mode is enabled for this external location.`) cmd.Flags().BoolVar(&updateReq.Force, "force", updateReq.Force, `Force update even if changing url invalidates dependent external tables or mounts.`) - cmd.Flags().Var(&updateReq.IsolationMode, "isolation-mode", `Whether the current securable is accessible from all workspaces or a specific set of workspaces. Supported values: [ISOLATION_MODE_ISOLATED, ISOLATION_MODE_OPEN]`) + cmd.Flags().Var(&updateReq.IsolationMode, "isolation-mode", `. Supported values: [ISOLATION_MODE_ISOLATED, ISOLATION_MODE_OPEN]`) cmd.Flags().StringVar(&updateReq.NewName, "new-name", updateReq.NewName, `New name for the external location.`) cmd.Flags().StringVar(&updateReq.Owner, "owner", updateReq.Owner, `The owner of the external location.`) cmd.Flags().BoolVar(&updateReq.ReadOnly, "read-only", updateReq.ReadOnly, `Indicates whether the external location is read-only.`) diff --git a/cmd/workspace/genie/genie.go b/cmd/workspace/genie/genie.go index 287bcde6..25fa9396 100755 --- a/cmd/workspace/genie/genie.go +++ b/cmd/workspace/genie/genie.go @@ -160,13 +160,13 @@ func newCreateMessage() *cobra.Command { // Functions can be added from the `init()` function in manually curated files in this directory. var executeMessageQueryOverrides []func( *cobra.Command, - *dashboards.ExecuteMessageQueryRequest, + *dashboards.GenieExecuteMessageQueryRequest, ) func newExecuteMessageQuery() *cobra.Command { cmd := &cobra.Command{} - var executeMessageQueryReq dashboards.ExecuteMessageQueryRequest + var executeMessageQueryReq dashboards.GenieExecuteMessageQueryRequest // TODO: short flags diff --git a/cmd/workspace/instance-pools/instance-pools.go b/cmd/workspace/instance-pools/instance-pools.go index 8a84df94..40c76a5d 100755 --- a/cmd/workspace/instance-pools/instance-pools.go +++ b/cmd/workspace/instance-pools/instance-pools.go @@ -635,8 +635,9 @@ func newSetPermissions() *cobra.Command { cmd.Short = `Set instance pool permissions.` cmd.Long = `Set instance pool permissions. - Sets permissions on an instance pool. Instance pools can inherit permissions - from their root object. + Sets permissions on an object, replacing existing permissions if they exist. + Deletes all direct permissions if none are specified. Objects can inherit + permissions from their root object. Arguments: INSTANCE_POOL_ID: The instance pool for which to get or manage permissions.` diff --git a/cmd/workspace/jobs/jobs.go b/cmd/workspace/jobs/jobs.go index d4ceb0c2..b067937e 100755 --- a/cmd/workspace/jobs/jobs.go +++ b/cmd/workspace/jobs/jobs.go @@ -847,7 +847,7 @@ func newGetRun() *cobra.Command { cmd.Flags().BoolVar(&getRunReq.IncludeHistory, "include-history", getRunReq.IncludeHistory, `Whether to include the repair history in the response.`) cmd.Flags().BoolVar(&getRunReq.IncludeResolvedValues, "include-resolved-values", getRunReq.IncludeResolvedValues, `Whether to include resolved parameter values in the response.`) - cmd.Flags().StringVar(&getRunReq.PageToken, "page-token", getRunReq.PageToken, `To list the next page or the previous page of job tasks, set this field to the value of the next_page_token or prev_page_token returned in the GetJob response.`) + cmd.Flags().StringVar(&getRunReq.PageToken, "page-token", getRunReq.PageToken, `To list the next page of job tasks, set this field to the value of the next_page_token returned in the GetJob response.`) cmd.Use = "get-run RUN_ID" cmd.Short = `Get a single job run.` @@ -1339,6 +1339,7 @@ func newRunNow() *cobra.Command { // TODO: array: jar_params // TODO: map via StringToStringVar: job_parameters // TODO: map via StringToStringVar: notebook_params + // TODO: array: only // TODO: complex arg: pipeline_params // TODO: map via StringToStringVar: python_named_params // TODO: array: python_params @@ -1470,8 +1471,9 @@ func newSetPermissions() *cobra.Command { cmd.Short = `Set job permissions.` cmd.Long = `Set job permissions. - Sets permissions on a job. Jobs can inherit permissions from their root - object. + Sets permissions on an object, replacing existing permissions if they exist. + Deletes all direct permissions if none are specified. Objects can inherit + permissions from their root object. Arguments: JOB_ID: The job for which to get or manage permissions.` @@ -1557,6 +1559,7 @@ func newSubmit() *cobra.Command { cmd.Flags().Var(&submitJson, "json", `either inline JSON string or @path/to/file.json with request body`) // TODO: array: access_control_list + cmd.Flags().StringVar(&submitReq.BudgetPolicyId, "budget-policy-id", submitReq.BudgetPolicyId, `The user specified id of the budget policy to use for this one-time run.`) // TODO: complex arg: email_notifications // TODO: array: environments // TODO: complex arg: git_source diff --git a/cmd/workspace/lakeview/lakeview.go b/cmd/workspace/lakeview/lakeview.go index 33a45c65..239c72b6 100755 --- a/cmd/workspace/lakeview/lakeview.go +++ b/cmd/workspace/lakeview/lakeview.go @@ -70,35 +70,26 @@ func newCreate() *cobra.Command { cmd := &cobra.Command{} var createReq dashboards.CreateDashboardRequest + createReq.Dashboard = &dashboards.Dashboard{} var createJson flags.JsonFlag // TODO: short flags cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) - cmd.Flags().StringVar(&createReq.ParentPath, "parent-path", createReq.ParentPath, `The workspace path of the folder containing the dashboard.`) - cmd.Flags().StringVar(&createReq.SerializedDashboard, "serialized-dashboard", createReq.SerializedDashboard, `The contents of the dashboard in serialized string form.`) - cmd.Flags().StringVar(&createReq.WarehouseId, "warehouse-id", createReq.WarehouseId, `The warehouse ID used to run the dashboard.`) + cmd.Flags().StringVar(&createReq.Dashboard.DisplayName, "display-name", createReq.Dashboard.DisplayName, `The display name of the dashboard.`) + cmd.Flags().StringVar(&createReq.Dashboard.SerializedDashboard, "serialized-dashboard", createReq.Dashboard.SerializedDashboard, `The contents of the dashboard in serialized string form.`) + cmd.Flags().StringVar(&createReq.Dashboard.WarehouseId, "warehouse-id", createReq.Dashboard.WarehouseId, `The warehouse ID used to run the dashboard.`) - cmd.Use = "create DISPLAY_NAME" + cmd.Use = "create" cmd.Short = `Create dashboard.` cmd.Long = `Create dashboard. - Create a draft dashboard. - - Arguments: - DISPLAY_NAME: The display name of the dashboard.` + Create a draft dashboard.` cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - if cmd.Flags().Changed("json") { - err := root.ExactArgs(0)(cmd, args) - if err != nil { - return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'display_name' in your JSON input") - } - return nil - } - check := root.ExactArgs(1) + check := root.ExactArgs(0) return check(cmd, args) } @@ -108,7 +99,7 @@ func newCreate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - diags := createJson.Unmarshal(&createReq) + diags := createJson.Unmarshal(&createReq.Dashboard) if diags.HasError() { return diags.Error() } @@ -119,9 +110,6 @@ func newCreate() *cobra.Command { } } } - if !cmd.Flags().Changed("json") { - createReq.DisplayName = args[0] - } response, err := w.Lakeview.Create(ctx, createReq) if err != nil { @@ -155,13 +143,15 @@ func newCreateSchedule() *cobra.Command { cmd := &cobra.Command{} var createScheduleReq dashboards.CreateScheduleRequest + createScheduleReq.Schedule = &dashboards.Schedule{} var createScheduleJson flags.JsonFlag // TODO: short flags cmd.Flags().Var(&createScheduleJson, "json", `either inline JSON string or @path/to/file.json with request body`) - cmd.Flags().StringVar(&createScheduleReq.DisplayName, "display-name", createScheduleReq.DisplayName, `The display name for schedule.`) - cmd.Flags().Var(&createScheduleReq.PauseStatus, "pause-status", `The status indicates whether this schedule is paused or not. Supported values: [PAUSED, UNPAUSED]`) + cmd.Flags().StringVar(&createScheduleReq.Schedule.DisplayName, "display-name", createScheduleReq.Schedule.DisplayName, `The display name for schedule.`) + cmd.Flags().Var(&createScheduleReq.Schedule.PauseStatus, "pause-status", `The status indicates whether this schedule is paused or not. Supported values: [PAUSED, UNPAUSED]`) + cmd.Flags().StringVar(&createScheduleReq.Schedule.WarehouseId, "warehouse-id", createScheduleReq.Schedule.WarehouseId, `The warehouse id to run the dashboard with for the schedule.`) cmd.Use = "create-schedule DASHBOARD_ID" cmd.Short = `Create dashboard schedule.` @@ -176,6 +166,13 @@ func newCreateSchedule() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { + if cmd.Flags().Changed("json") { + err := root.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'cron_schedule' in your JSON input") + } + return nil + } check := root.ExactArgs(1) return check(cmd, args) } @@ -186,7 +183,7 @@ func newCreateSchedule() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - diags := createScheduleJson.Unmarshal(&createScheduleReq) + diags := createScheduleJson.Unmarshal(&createScheduleReq.Schedule) if diags.HasError() { return diags.Error() } @@ -196,8 +193,6 @@ func newCreateSchedule() *cobra.Command { return err } } - } else { - return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") } createScheduleReq.DashboardId = args[0] @@ -233,6 +228,7 @@ func newCreateSubscription() *cobra.Command { cmd := &cobra.Command{} var createSubscriptionReq dashboards.CreateSubscriptionRequest + createSubscriptionReq.Subscription = &dashboards.Subscription{} var createSubscriptionJson flags.JsonFlag // TODO: short flags @@ -252,6 +248,13 @@ func newCreateSubscription() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { + if cmd.Flags().Changed("json") { + err := root.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'subscriber' in your JSON input") + } + return nil + } check := root.ExactArgs(2) return check(cmd, args) } @@ -262,7 +265,7 @@ func newCreateSubscription() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - diags := createSubscriptionJson.Unmarshal(&createSubscriptionReq) + diags := createSubscriptionJson.Unmarshal(&createSubscriptionReq.Subscription) if diags.HasError() { return diags.Error() } @@ -272,8 +275,6 @@ func newCreateSubscription() *cobra.Command { return err } } - } else { - return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") } createSubscriptionReq.DashboardId = args[0] createSubscriptionReq.ScheduleId = args[1] @@ -313,8 +314,6 @@ func newDeleteSchedule() *cobra.Command { // TODO: short flags - cmd.Flags().StringVar(&deleteScheduleReq.Etag, "etag", deleteScheduleReq.Etag, `The etag for the schedule.`) - cmd.Use = "delete-schedule DASHBOARD_ID SCHEDULE_ID" cmd.Short = `Delete dashboard schedule.` cmd.Long = `Delete dashboard schedule. @@ -376,8 +375,6 @@ func newDeleteSubscription() *cobra.Command { // TODO: short flags - cmd.Flags().StringVar(&deleteSubscriptionReq.Etag, "etag", deleteSubscriptionReq.Etag, `The etag for the subscription.`) - cmd.Use = "delete-subscription DASHBOARD_ID SCHEDULE_ID SUBSCRIPTION_ID" cmd.Short = `Delete schedule subscription.` cmd.Long = `Delete schedule subscription. @@ -682,7 +679,6 @@ func newList() *cobra.Command { // TODO: short flags cmd.Flags().IntVar(&listReq.PageSize, "page-size", listReq.PageSize, `The number of dashboards to return per page.`) - cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `A page token, received from a previous ListDashboards call.`) cmd.Flags().BoolVar(&listReq.ShowTrashed, "show-trashed", listReq.ShowTrashed, `The flag to include dashboards located in the trash.`) cmd.Flags().Var(&listReq.View, "view", `DASHBOARD_VIEW_BASIConly includes summary metadata from the dashboard. Supported values: [DASHBOARD_VIEW_BASIC]`) @@ -735,7 +731,6 @@ func newListSchedules() *cobra.Command { // TODO: short flags cmd.Flags().IntVar(&listSchedulesReq.PageSize, "page-size", listSchedulesReq.PageSize, `The number of schedules to return per page.`) - cmd.Flags().StringVar(&listSchedulesReq.PageToken, "page-token", listSchedulesReq.PageToken, `A page token, received from a previous ListSchedules call.`) cmd.Use = "list-schedules DASHBOARD_ID" cmd.Short = `List dashboard schedules.` @@ -794,7 +789,6 @@ func newListSubscriptions() *cobra.Command { // TODO: short flags cmd.Flags().IntVar(&listSubscriptionsReq.PageSize, "page-size", listSubscriptionsReq.PageSize, `The number of subscriptions to return per page.`) - cmd.Flags().StringVar(&listSubscriptionsReq.PageToken, "page-token", listSubscriptionsReq.PageToken, `A page token, received from a previous ListSubscriptions call.`) cmd.Use = "list-subscriptions DASHBOARD_ID SCHEDULE_ID" cmd.Short = `List schedule subscriptions.` @@ -1126,15 +1120,15 @@ func newUpdate() *cobra.Command { cmd := &cobra.Command{} var updateReq dashboards.UpdateDashboardRequest + updateReq.Dashboard = &dashboards.Dashboard{} var updateJson flags.JsonFlag // TODO: short flags cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) - cmd.Flags().StringVar(&updateReq.DisplayName, "display-name", updateReq.DisplayName, `The display name of the dashboard.`) - cmd.Flags().StringVar(&updateReq.Etag, "etag", updateReq.Etag, `The etag for the dashboard.`) - cmd.Flags().StringVar(&updateReq.SerializedDashboard, "serialized-dashboard", updateReq.SerializedDashboard, `The contents of the dashboard in serialized string form.`) - cmd.Flags().StringVar(&updateReq.WarehouseId, "warehouse-id", updateReq.WarehouseId, `The warehouse ID used to run the dashboard.`) + cmd.Flags().StringVar(&updateReq.Dashboard.DisplayName, "display-name", updateReq.Dashboard.DisplayName, `The display name of the dashboard.`) + cmd.Flags().StringVar(&updateReq.Dashboard.SerializedDashboard, "serialized-dashboard", updateReq.Dashboard.SerializedDashboard, `The contents of the dashboard in serialized string form.`) + cmd.Flags().StringVar(&updateReq.Dashboard.WarehouseId, "warehouse-id", updateReq.Dashboard.WarehouseId, `The warehouse ID used to run the dashboard.`) cmd.Use = "update DASHBOARD_ID" cmd.Short = `Update dashboard.` @@ -1158,7 +1152,7 @@ func newUpdate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - diags := updateJson.Unmarshal(&updateReq) + diags := updateJson.Unmarshal(&updateReq.Dashboard) if diags.HasError() { return diags.Error() } @@ -1203,14 +1197,15 @@ func newUpdateSchedule() *cobra.Command { cmd := &cobra.Command{} var updateScheduleReq dashboards.UpdateScheduleRequest + updateScheduleReq.Schedule = &dashboards.Schedule{} var updateScheduleJson flags.JsonFlag // TODO: short flags cmd.Flags().Var(&updateScheduleJson, "json", `either inline JSON string or @path/to/file.json with request body`) - cmd.Flags().StringVar(&updateScheduleReq.DisplayName, "display-name", updateScheduleReq.DisplayName, `The display name for schedule.`) - cmd.Flags().StringVar(&updateScheduleReq.Etag, "etag", updateScheduleReq.Etag, `The etag for the schedule.`) - cmd.Flags().Var(&updateScheduleReq.PauseStatus, "pause-status", `The status indicates whether this schedule is paused or not. Supported values: [PAUSED, UNPAUSED]`) + cmd.Flags().StringVar(&updateScheduleReq.Schedule.DisplayName, "display-name", updateScheduleReq.Schedule.DisplayName, `The display name for schedule.`) + cmd.Flags().Var(&updateScheduleReq.Schedule.PauseStatus, "pause-status", `The status indicates whether this schedule is paused or not. Supported values: [PAUSED, UNPAUSED]`) + cmd.Flags().StringVar(&updateScheduleReq.Schedule.WarehouseId, "warehouse-id", updateScheduleReq.Schedule.WarehouseId, `The warehouse id to run the dashboard with for the schedule.`) cmd.Use = "update-schedule DASHBOARD_ID SCHEDULE_ID" cmd.Short = `Update dashboard schedule.` @@ -1226,6 +1221,13 @@ func newUpdateSchedule() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { + if cmd.Flags().Changed("json") { + err := root.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'cron_schedule' in your JSON input") + } + return nil + } check := root.ExactArgs(2) return check(cmd, args) } @@ -1236,7 +1238,7 @@ func newUpdateSchedule() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - diags := updateScheduleJson.Unmarshal(&updateScheduleReq) + diags := updateScheduleJson.Unmarshal(&updateScheduleReq.Schedule) if diags.HasError() { return diags.Error() } @@ -1246,8 +1248,6 @@ func newUpdateSchedule() *cobra.Command { return err } } - } else { - return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") } updateScheduleReq.DashboardId = args[0] updateScheduleReq.ScheduleId = args[1] diff --git a/cmd/workspace/model-registry/model-registry.go b/cmd/workspace/model-registry/model-registry.go index b45d83e3..19446469 100755 --- a/cmd/workspace/model-registry/model-registry.go +++ b/cmd/workspace/model-registry/model-registry.go @@ -2123,7 +2123,8 @@ func newSetPermissions() *cobra.Command { cmd.Short = `Set registered model permissions.` cmd.Long = `Set registered model permissions. - Sets permissions on a registered model. Registered models can inherit + Sets permissions on an object, replacing existing permissions if they exist. + Deletes all direct permissions if none are specified. Objects can inherit permissions from their root object. Arguments: diff --git a/cmd/workspace/online-tables/online-tables.go b/cmd/workspace/online-tables/online-tables.go index 1c25d1e2..f050017e 100755 --- a/cmd/workspace/online-tables/online-tables.go +++ b/cmd/workspace/online-tables/online-tables.go @@ -3,6 +3,9 @@ package online_tables import ( + "fmt" + "time" + "github.com/databricks/cli/cmd/root" "github.com/databricks/cli/libs/cmdio" "github.com/databricks/cli/libs/flags" @@ -52,13 +55,20 @@ func newCreate() *cobra.Command { cmd := &cobra.Command{} var createReq catalog.CreateOnlineTableRequest + createReq.Table = &catalog.OnlineTable{} var createJson flags.JsonFlag + var createSkipWait bool + var createTimeout time.Duration + + cmd.Flags().BoolVar(&createSkipWait, "no-wait", createSkipWait, `do not wait to reach ACTIVE state`) + cmd.Flags().DurationVar(&createTimeout, "timeout", 20*time.Minute, `maximum amount of time to reach ACTIVE state`) // TODO: short flags cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) - cmd.Flags().StringVar(&createReq.Name, "name", createReq.Name, `Full three-part (catalog, schema, table) name of the table.`) + cmd.Flags().StringVar(&createReq.Table.Name, "name", createReq.Table.Name, `Full three-part (catalog, schema, table) name of the table.`) // TODO: complex arg: spec + // TODO: complex arg: status cmd.Use = "create" cmd.Short = `Create an Online Table.` @@ -79,7 +89,7 @@ func newCreate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - diags := createJson.Unmarshal(&createReq) + diags := createJson.Unmarshal(&createReq.Table) if diags.HasError() { return diags.Error() } @@ -91,11 +101,24 @@ func newCreate() *cobra.Command { } } - response, err := w.OnlineTables.Create(ctx, createReq) + wait, err := w.OnlineTables.Create(ctx, createReq) if err != nil { return err } - return cmdio.Render(ctx, response) + if createSkipWait { + return cmdio.Render(ctx, wait.Response) + } + spinner := cmdio.Spinner(ctx) + info, err := wait.OnProgress(func(i *catalog.OnlineTable) { + status := i.UnityCatalogProvisioningState + statusMessage := fmt.Sprintf("current status: %s", status) + spinner <- statusMessage + }).GetWithTimeout(createTimeout) + close(spinner) + if err != nil { + return err + } + return cmdio.Render(ctx, info) } // Disable completions since they are not applicable. diff --git a/cmd/workspace/permissions/permissions.go b/cmd/workspace/permissions/permissions.go index d007a425..ca570351 100755 --- a/cmd/workspace/permissions/permissions.go +++ b/cmd/workspace/permissions/permissions.go @@ -241,8 +241,9 @@ func newSet() *cobra.Command { cmd.Short = `Set object permissions.` cmd.Long = `Set object permissions. - Sets permissions on an object. Objects can inherit permissions from their - parent objects or root object. + Sets permissions on an object, replacing existing permissions if they exist. + Deletes all direct permissions if none are specified. Objects can inherit + permissions from their parent objects or root object. Arguments: REQUEST_OBJECT_TYPE: The type of the request object. Can be one of the following: alerts, diff --git a/cmd/workspace/pipelines/pipelines.go b/cmd/workspace/pipelines/pipelines.go index 5bd94e0b..38636e83 100755 --- a/cmd/workspace/pipelines/pipelines.go +++ b/cmd/workspace/pipelines/pipelines.go @@ -691,8 +691,9 @@ func newSetPermissions() *cobra.Command { cmd.Short = `Set pipeline permissions.` cmd.Long = `Set pipeline permissions. - Sets permissions on a pipeline. Pipelines can inherit permissions from their - root object. + Sets permissions on an object, replacing existing permissions if they exist. + Deletes all direct permissions if none are specified. Objects can inherit + permissions from their root object. Arguments: PIPELINE_ID: The pipeline for which to get or manage permissions.` @@ -972,6 +973,7 @@ func newUpdate() *cobra.Command { // TODO: array: notifications cmd.Flags().BoolVar(&updateReq.Photon, "photon", updateReq.Photon, `Whether Photon is enabled for this pipeline.`) cmd.Flags().StringVar(&updateReq.PipelineId, "pipeline-id", updateReq.PipelineId, `Unique identifier for this pipeline.`) + // TODO: complex arg: restart_window cmd.Flags().StringVar(&updateReq.Schema, "schema", updateReq.Schema, `The default schema (database) where tables are read from or published to.`) cmd.Flags().BoolVar(&updateReq.Serverless, "serverless", updateReq.Serverless, `Whether serverless compute is enabled for this pipeline.`) cmd.Flags().StringVar(&updateReq.Storage, "storage", updateReq.Storage, `DBFS root directory for storing checkpoints and tables.`) diff --git a/cmd/workspace/repos/repos.go b/cmd/workspace/repos/repos.go index b77347b0..7dcb1353 100755 --- a/cmd/workspace/repos/repos.go +++ b/cmd/workspace/repos/repos.go @@ -513,8 +513,9 @@ func newSetPermissions() *cobra.Command { cmd.Short = `Set repo permissions.` cmd.Long = `Set repo permissions. - Sets permissions on a repo. Repos can inherit permissions from their root - object. + Sets permissions on an object, replacing existing permissions if they exist. + Deletes all direct permissions if none are specified. Objects can inherit + permissions from their root object. Arguments: REPO_ID: The repo for which to get or manage permissions.` diff --git a/cmd/workspace/serving-endpoints/serving-endpoints.go b/cmd/workspace/serving-endpoints/serving-endpoints.go index 363e9ea1..cc99177c 100755 --- a/cmd/workspace/serving-endpoints/serving-endpoints.go +++ b/cmd/workspace/serving-endpoints/serving-endpoints.go @@ -1008,7 +1008,8 @@ func newSetPermissions() *cobra.Command { cmd.Short = `Set serving endpoint permissions.` cmd.Long = `Set serving endpoint permissions. - Sets permissions on a serving endpoint. Serving endpoints can inherit + Sets permissions on an object, replacing existing permissions if they exist. + Deletes all direct permissions if none are specified. Objects can inherit permissions from their root object. Arguments: diff --git a/cmd/workspace/settings/settings.go b/cmd/workspace/settings/settings.go index aaeecf41..cca77b2d 100755 --- a/cmd/workspace/settings/settings.go +++ b/cmd/workspace/settings/settings.go @@ -5,10 +5,13 @@ package settings import ( "github.com/spf13/cobra" + aibi_dashboard_embedding_access_policy "github.com/databricks/cli/cmd/workspace/aibi-dashboard-embedding-access-policy" + aibi_dashboard_embedding_approved_domains "github.com/databricks/cli/cmd/workspace/aibi-dashboard-embedding-approved-domains" automatic_cluster_update "github.com/databricks/cli/cmd/workspace/automatic-cluster-update" compliance_security_profile "github.com/databricks/cli/cmd/workspace/compliance-security-profile" default_namespace "github.com/databricks/cli/cmd/workspace/default-namespace" disable_legacy_access "github.com/databricks/cli/cmd/workspace/disable-legacy-access" + disable_legacy_dbfs "github.com/databricks/cli/cmd/workspace/disable-legacy-dbfs" enhanced_security_monitoring "github.com/databricks/cli/cmd/workspace/enhanced-security-monitoring" restrict_workspace_admins "github.com/databricks/cli/cmd/workspace/restrict-workspace-admins" ) @@ -29,10 +32,13 @@ func New() *cobra.Command { } // Add subservices + cmd.AddCommand(aibi_dashboard_embedding_access_policy.New()) + cmd.AddCommand(aibi_dashboard_embedding_approved_domains.New()) cmd.AddCommand(automatic_cluster_update.New()) cmd.AddCommand(compliance_security_profile.New()) cmd.AddCommand(default_namespace.New()) cmd.AddCommand(disable_legacy_access.New()) + cmd.AddCommand(disable_legacy_dbfs.New()) cmd.AddCommand(enhanced_security_monitoring.New()) cmd.AddCommand(restrict_workspace_admins.New()) diff --git a/cmd/workspace/shares/shares.go b/cmd/workspace/shares/shares.go index 62c3407f..f70963f2 100755 --- a/cmd/workspace/shares/shares.go +++ b/cmd/workspace/shares/shares.go @@ -391,7 +391,6 @@ func newUpdate() *cobra.Command { cmd.Flags().StringVar(&updateReq.Comment, "comment", updateReq.Comment, `User-provided free-form text description.`) cmd.Flags().StringVar(&updateReq.NewName, "new-name", updateReq.NewName, `New name for the share.`) - cmd.Flags().StringVar(&updateReq.Owner, "owner", updateReq.Owner, `Username of current owner of share.`) cmd.Flags().StringVar(&updateReq.StorageRoot, "storage-root", updateReq.StorageRoot, `Storage root URL for the share.`) // TODO: array: updates diff --git a/cmd/workspace/storage-credentials/storage-credentials.go b/cmd/workspace/storage-credentials/storage-credentials.go index 2caf0904..4dc02806 100755 --- a/cmd/workspace/storage-credentials/storage-credentials.go +++ b/cmd/workspace/storage-credentials/storage-credentials.go @@ -360,7 +360,7 @@ func newUpdate() *cobra.Command { cmd.Flags().StringVar(&updateReq.Comment, "comment", updateReq.Comment, `Comment associated with the credential.`) // TODO: complex arg: databricks_gcp_service_account cmd.Flags().BoolVar(&updateReq.Force, "force", updateReq.Force, `Force update even if there are dependent external locations or external tables.`) - cmd.Flags().Var(&updateReq.IsolationMode, "isolation-mode", `Whether the current securable is accessible from all workspaces or a specific set of workspaces. Supported values: [ISOLATION_MODE_ISOLATED, ISOLATION_MODE_OPEN]`) + cmd.Flags().Var(&updateReq.IsolationMode, "isolation-mode", `. Supported values: [ISOLATION_MODE_ISOLATED, ISOLATION_MODE_OPEN]`) cmd.Flags().StringVar(&updateReq.NewName, "new-name", updateReq.NewName, `New name for the storage credential.`) cmd.Flags().StringVar(&updateReq.Owner, "owner", updateReq.Owner, `Username of current owner of credential.`) cmd.Flags().BoolVar(&updateReq.ReadOnly, "read-only", updateReq.ReadOnly, `Whether the storage credential is only usable for read operations.`) diff --git a/cmd/workspace/tables/tables.go b/cmd/workspace/tables/tables.go index 35775f17..1ef247b6 100755 --- a/cmd/workspace/tables/tables.go +++ b/cmd/workspace/tables/tables.go @@ -304,6 +304,7 @@ func newList() *cobra.Command { cmd.Flags().IntVar(&listReq.MaxResults, "max-results", listReq.MaxResults, `Maximum number of tables to return.`) cmd.Flags().BoolVar(&listReq.OmitColumns, "omit-columns", listReq.OmitColumns, `Whether to omit the columns of the table from the response or not.`) cmd.Flags().BoolVar(&listReq.OmitProperties, "omit-properties", listReq.OmitProperties, `Whether to omit the properties of the table from the response or not.`) + cmd.Flags().BoolVar(&listReq.OmitUsername, "omit-username", listReq.OmitUsername, `Whether to omit the username of the table (e.g.`) cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `Opaque token to send for the next page of results (pagination).`) cmd.Use = "list CATALOG_NAME SCHEMA_NAME" diff --git a/cmd/workspace/token-management/token-management.go b/cmd/workspace/token-management/token-management.go index 6deb8d12..c8d57fd6 100755 --- a/cmd/workspace/token-management/token-management.go +++ b/cmd/workspace/token-management/token-management.go @@ -448,8 +448,9 @@ func newSetPermissions() *cobra.Command { cmd.Short = `Set token permissions.` cmd.Long = `Set token permissions. - Sets permissions on all tokens. Tokens can inherit permissions from their root - object.` + Sets permissions on an object, replacing existing permissions if they exist. + Deletes all direct permissions if none are specified. Objects can inherit + permissions from their root object.` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/users/users.go b/cmd/workspace/users/users.go index b085ab41..e787446a 100755 --- a/cmd/workspace/users/users.go +++ b/cmd/workspace/users/users.go @@ -542,8 +542,9 @@ func newSetPermissions() *cobra.Command { cmd.Short = `Set password permissions.` cmd.Long = `Set password permissions. - Sets permissions on all passwords. Passwords can inherit permissions from - their root object.` + Sets permissions on an object, replacing existing permissions if they exist. + Deletes all direct permissions if none are specified. Objects can inherit + permissions from their root object.` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/warehouses/warehouses.go b/cmd/workspace/warehouses/warehouses.go index 43d6c8ab..03925bd7 100755 --- a/cmd/workspace/warehouses/warehouses.go +++ b/cmd/workspace/warehouses/warehouses.go @@ -686,8 +686,9 @@ func newSetPermissions() *cobra.Command { cmd.Short = `Set SQL warehouse permissions.` cmd.Long = `Set SQL warehouse permissions. - Sets permissions on a SQL warehouse. SQL warehouses can inherit permissions - from their root object. + Sets permissions on an object, replacing existing permissions if they exist. + Deletes all direct permissions if none are specified. Objects can inherit + permissions from their root object. Arguments: WAREHOUSE_ID: The SQL warehouse for which to get or manage permissions.` diff --git a/cmd/workspace/workspace/workspace.go b/cmd/workspace/workspace/workspace.go index 21da478c..61e1437a 100755 --- a/cmd/workspace/workspace/workspace.go +++ b/cmd/workspace/workspace/workspace.go @@ -447,6 +447,7 @@ func newImport() *cobra.Command { DBC, HTML, JUPYTER, + RAW, R_MARKDOWN, SOURCE, ]`) @@ -708,7 +709,8 @@ func newSetPermissions() *cobra.Command { cmd.Short = `Set workspace object permissions.` cmd.Long = `Set workspace object permissions. - Sets permissions on a workspace object. Workspace objects can inherit + Sets permissions on an object, replacing existing permissions if they exist. + Deletes all direct permissions if none are specified. Objects can inherit permissions from their parent objects or root object. Arguments: diff --git a/go.mod b/go.mod index 697205f3..9ae5fde0 100644 --- a/go.mod +++ b/go.mod @@ -1,20 +1,20 @@ module github.com/databricks/cli -go 1.22.0 +go 1.23 -toolchain go1.22.7 +toolchain go1.23.2 require ( github.com/Masterminds/semver/v3 v3.3.0 // MIT github.com/briandowns/spinner v1.23.1 // Apache 2.0 - github.com/databricks/databricks-sdk-go v0.48.0 // Apache 2.0 - github.com/fatih/color v1.17.0 // MIT + github.com/databricks/databricks-sdk-go v0.51.0 // Apache 2.0 + github.com/fatih/color v1.18.0 // MIT github.com/ghodss/yaml v1.0.0 // MIT + NOTICE github.com/google/uuid v1.6.0 // BSD-3-Clause github.com/hashicorp/go-version v1.7.0 // MPL 2.0 github.com/hashicorp/hc-install v0.9.0 // MPL 2.0 github.com/hashicorp/terraform-exec v0.21.0 // MPL 2.0 - github.com/hashicorp/terraform-json v0.22.1 // MPL 2.0 + github.com/hashicorp/terraform-json v0.23.0 // MPL 2.0 github.com/manifoldco/promptui v0.9.0 // BSD-3-Clause github.com/mattn/go-isatty v0.0.20 // MIT github.com/nwidger/jsoncolor v0.3.2 // MIT @@ -24,11 +24,11 @@ require ( github.com/spf13/pflag v1.0.5 // BSD-3-Clause github.com/stretchr/testify v1.9.0 // MIT golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 - golang.org/x/mod v0.21.0 - golang.org/x/oauth2 v0.23.0 - golang.org/x/sync v0.8.0 + golang.org/x/mod v0.22.0 + golang.org/x/oauth2 v0.24.0 + golang.org/x/sync v0.9.0 golang.org/x/term v0.25.0 - golang.org/x/text v0.19.0 + golang.org/x/text v0.20.0 gopkg.in/ini.v1 v1.67.0 // Apache 2.0 gopkg.in/yaml.v3 v3.0.1 ) @@ -56,7 +56,7 @@ require ( github.com/mattn/go-colorable v0.1.13 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/stretchr/objx v0.5.2 // indirect - github.com/zclconf/go-cty v1.14.4 // indirect + github.com/zclconf/go-cty v1.15.0 // indirect go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect go.opentelemetry.io/otel v1.24.0 // indirect diff --git a/go.sum b/go.sum index 03698b20..2bfcfb2f 100644 --- a/go.sum +++ b/go.sum @@ -32,8 +32,8 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= -github.com/databricks/databricks-sdk-go v0.48.0 h1:46KtsnRo+FGhC3izUXbpL0PXBNomvsdignYDhJZlm9s= -github.com/databricks/databricks-sdk-go v0.48.0/go.mod h1:ds+zbv5mlQG7nFEU5ojLtgN/u0/9YzZmKQES/CfedzU= +github.com/databricks/databricks-sdk-go v0.51.0 h1:tcvB9TID3oUl0O8npccB5c+33tarBiYMBFbq4U4AB6M= +github.com/databricks/databricks-sdk-go v0.51.0/go.mod h1:ds+zbv5mlQG7nFEU5ojLtgN/u0/9YzZmKQES/CfedzU= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -44,8 +44,8 @@ github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.m github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= -github.com/fatih/color v1.17.0 h1:GlRw1BRJxkpqUCBKzKOw098ed57fEsKeNjpTe3cSjK4= -github.com/fatih/color v1.17.0/go.mod h1:YZ7TlrGPkiz6ku9fK3TLD/pl3CpsiFyu8N92HLgmosI= +github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= +github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= @@ -109,8 +109,8 @@ github.com/hashicorp/hc-install v0.9.0 h1:2dIk8LcvANwtv3QZLckxcjyF5w8KVtiMxu6G6e github.com/hashicorp/hc-install v0.9.0/go.mod h1:+6vOP+mf3tuGgMApVYtmsnDoKWMDcFXeTxCACYZ8SFg= github.com/hashicorp/terraform-exec v0.21.0 h1:uNkLAe95ey5Uux6KJdua6+cv8asgILFVWkd/RG0D2XQ= github.com/hashicorp/terraform-exec v0.21.0/go.mod h1:1PPeMYou+KDUSSeRE9szMZ/oHf4fYUmB923Wzbq1ICg= -github.com/hashicorp/terraform-json v0.22.1 h1:xft84GZR0QzjPVWs4lRUwvTcPnegqlyS7orfb5Ltvec= -github.com/hashicorp/terraform-json v0.22.1/go.mod h1:JbWSQCLFSXFFhg42T7l9iJwdGXBYV8fmmD6o/ML4p3A= +github.com/hashicorp/terraform-json v0.23.0 h1:sniCkExU4iKtTADReHzACkk8fnpQXrdD2xoR+lppBkI= +github.com/hashicorp/terraform-json v0.23.0/go.mod h1:MHdXbBAbSg0GvzuWazEGKAn/cyNfIB7mN6y7KJN6y2c= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= @@ -160,8 +160,8 @@ github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsT github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= -github.com/zclconf/go-cty v1.14.4 h1:uXXczd9QDGsgu0i/QFR/hzI5NYCHLf6NQw/atrbnhq8= -github.com/zclconf/go-cty v1.14.4/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= +github.com/zclconf/go-cty v1.15.0 h1:tTCRWxsexYUmtt/wVxgDClUe+uQusuI443uL6e+5sXQ= +github.com/zclconf/go-cty v1.15.0/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 h1:4Pp6oUg3+e/6M4C0A/3kJ2VYa++dsWVTtGgLVj5xtHg= @@ -184,8 +184,8 @@ golang.org/x/exp v0.0.0-20240222234643-814bf88cf225/go.mod h1:CxmFvTBINI24O/j8iY golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= -golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= +golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4= +golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -195,13 +195,13 @@ golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= -golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.24.0 h1:KTBBxWqUa0ykRPLtV69rRto9TLXcqYkeswu48x/gvNE= +golang.org/x/oauth2 v0.24.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ= +golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -218,8 +218,8 @@ golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24= golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= -golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= +golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/internal/bundle/bundles/dashboards/databricks_template_schema.json b/internal/bundle/bundles/dashboards/databricks_template_schema.json new file mode 100644 index 00000000..1aa5728f --- /dev/null +++ b/internal/bundle/bundles/dashboards/databricks_template_schema.json @@ -0,0 +1,12 @@ +{ + "properties": { + "unique_id": { + "type": "string", + "description": "Unique ID for job name" + }, + "warehouse_id": { + "type": "string", + "description": "The SQL warehouse ID to use for the dashboard" + } + } +} diff --git a/internal/bundle/bundles/dashboards/template/dashboard.lvdash.json b/internal/bundle/bundles/dashboards/template/dashboard.lvdash.json new file mode 100644 index 00000000..397a9a12 --- /dev/null +++ b/internal/bundle/bundles/dashboards/template/dashboard.lvdash.json @@ -0,0 +1,34 @@ +{ + "pages": [ + { + "displayName": "New Page", + "layout": [ + { + "position": { + "height": 2, + "width": 6, + "x": 0, + "y": 0 + }, + "widget": { + "name": "82eb9107", + "textbox_spec": "# I'm a title" + } + }, + { + "position": { + "height": 2, + "width": 6, + "x": 0, + "y": 2 + }, + "widget": { + "name": "ffa6de4f", + "textbox_spec": "Text" + } + } + ], + "name": "fdd21a3c" + } + ] +} diff --git a/internal/bundle/bundles/dashboards/template/databricks.yml.tmpl b/internal/bundle/bundles/dashboards/template/databricks.yml.tmpl new file mode 100644 index 00000000..e7771238 --- /dev/null +++ b/internal/bundle/bundles/dashboards/template/databricks.yml.tmpl @@ -0,0 +1,12 @@ +bundle: + name: dashboards + +workspace: + root_path: "~/.bundle/{{.unique_id}}" + +resources: + dashboards: + file_reference: + display_name: test-dashboard-{{.unique_id}} + file_path: ./dashboard.lvdash.json + warehouse_id: {{.warehouse_id}} diff --git a/internal/bundle/bundles/python_wheel_task_with_cluster/databricks_template_schema.json b/internal/bundle/bundles/python_wheel_task_with_cluster/databricks_template_schema.json new file mode 100644 index 00000000..621dff6a --- /dev/null +++ b/internal/bundle/bundles/python_wheel_task_with_cluster/databricks_template_schema.json @@ -0,0 +1,25 @@ +{ + "properties": { + "project_name": { + "type": "string", + "default": "my_test_code", + "description": "Unique name for this project" + }, + "spark_version": { + "type": "string", + "description": "Spark version used for job cluster" + }, + "node_type_id": { + "type": "string", + "description": "Node type id for job cluster" + }, + "unique_id": { + "type": "string", + "description": "Unique ID for job name" + }, + "instance_pool_id": { + "type": "string", + "description": "Instance pool id for job cluster" + } + } +} diff --git a/internal/bundle/bundles/python_wheel_task_with_cluster/template/databricks.yml.tmpl b/internal/bundle/bundles/python_wheel_task_with_cluster/template/databricks.yml.tmpl new file mode 100644 index 00000000..bb2d3d7d --- /dev/null +++ b/internal/bundle/bundles/python_wheel_task_with_cluster/template/databricks.yml.tmpl @@ -0,0 +1,29 @@ +bundle: + name: wheel-task + +workspace: + root_path: "~/.bundle/{{.unique_id}}" + +resources: + clusters: + test_cluster: + cluster_name: "test-cluster-{{.unique_id}}" + spark_version: "{{.spark_version}}" + node_type_id: "{{.node_type_id}}" + num_workers: 1 + data_security_mode: USER_ISOLATION + + jobs: + some_other_job: + name: "[${bundle.target}] Test Wheel Job {{.unique_id}}" + tasks: + - task_key: TestTask + existing_cluster_id: "${resources.clusters.test_cluster.cluster_id}" + python_wheel_task: + package_name: my_test_code + entry_point: run + parameters: + - "one" + - "two" + libraries: + - whl: ./dist/*.whl diff --git a/internal/bundle/bundles/python_wheel_task_with_cluster/template/setup.py.tmpl b/internal/bundle/bundles/python_wheel_task_with_cluster/template/setup.py.tmpl new file mode 100644 index 00000000..b528657b --- /dev/null +++ b/internal/bundle/bundles/python_wheel_task_with_cluster/template/setup.py.tmpl @@ -0,0 +1,15 @@ +from setuptools import setup, find_packages + +import {{.project_name}} + +setup( + name="{{.project_name}}", + version={{.project_name}}.__version__, + author={{.project_name}}.__author__, + url="https://databricks.com", + author_email="john.doe@databricks.com", + description="my example wheel", + packages=find_packages(include=["{{.project_name}}"]), + entry_points={"group1": "run={{.project_name}}.__main__:main"}, + install_requires=["setuptools"], +) diff --git a/internal/bundle/bundles/python_wheel_task_with_cluster/template/{{.project_name}}/__init__.py b/internal/bundle/bundles/python_wheel_task_with_cluster/template/{{.project_name}}/__init__.py new file mode 100644 index 00000000..909f1f32 --- /dev/null +++ b/internal/bundle/bundles/python_wheel_task_with_cluster/template/{{.project_name}}/__init__.py @@ -0,0 +1,2 @@ +__version__ = "0.0.1" +__author__ = "Databricks" diff --git a/internal/bundle/bundles/python_wheel_task_with_cluster/template/{{.project_name}}/__main__.py b/internal/bundle/bundles/python_wheel_task_with_cluster/template/{{.project_name}}/__main__.py new file mode 100644 index 00000000..ea918ce2 --- /dev/null +++ b/internal/bundle/bundles/python_wheel_task_with_cluster/template/{{.project_name}}/__main__.py @@ -0,0 +1,16 @@ +""" +The entry point of the Python Wheel +""" + +import sys + + +def main(): + # This method will print the provided arguments + print("Hello from my func") + print("Got arguments:") + print(sys.argv) + + +if __name__ == "__main__": + main() diff --git a/internal/bundle/dashboards_test.go b/internal/bundle/dashboards_test.go new file mode 100644 index 00000000..3c2e27c6 --- /dev/null +++ b/internal/bundle/dashboards_test.go @@ -0,0 +1,65 @@ +package bundle + +import ( + "fmt" + "testing" + + "github.com/databricks/cli/internal/acc" + "github.com/databricks/databricks-sdk-go/service/dashboards" + "github.com/databricks/databricks-sdk-go/service/workspace" + "github.com/google/uuid" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestAccDashboards(t *testing.T) { + ctx, wt := acc.WorkspaceTest(t) + + warehouseID := acc.GetEnvOrSkipTest(t, "TEST_DEFAULT_WAREHOUSE_ID") + uniqueID := uuid.New().String() + root, err := initTestTemplate(t, ctx, "dashboards", map[string]any{ + "unique_id": uniqueID, + "warehouse_id": warehouseID, + }) + require.NoError(t, err) + + t.Cleanup(func() { + err = destroyBundle(t, ctx, root) + require.NoError(t, err) + }) + + err = deployBundle(t, ctx, root) + require.NoError(t, err) + + // Load bundle configuration by running the validate command. + b := unmarshalConfig(t, mustValidateBundle(t, ctx, root)) + + // Assert that the dashboard exists at the expected path and is, indeed, a dashboard. + oi, err := wt.W.Workspace.GetStatusByPath(ctx, fmt.Sprintf("%s/test-dashboard-%s.lvdash.json", b.Config.Workspace.ResourcePath, uniqueID)) + require.NoError(t, err) + assert.EqualValues(t, workspace.ObjectTypeDashboard, oi.ObjectType) + + // Load the dashboard by its ID and confirm its display name. + dashboard, err := wt.W.Lakeview.GetByDashboardId(ctx, oi.ResourceId) + require.NoError(t, err) + assert.Equal(t, fmt.Sprintf("test-dashboard-%s", uniqueID), dashboard.DisplayName) + + // Make an out of band modification to the dashboard and confirm that it is detected. + _, err = wt.W.Lakeview.Update(ctx, dashboards.UpdateDashboardRequest{ + DashboardId: oi.ResourceId, + Dashboard: &dashboards.Dashboard{ + SerializedDashboard: dashboard.SerializedDashboard, + }, + }) + require.NoError(t, err) + + // Try to redeploy the bundle and confirm that the out of band modification is detected. + stdout, _, err := deployBundleWithArgs(t, ctx, root) + require.Error(t, err) + assert.Contains(t, stdout, `Error: dashboard "file_reference" has been modified remotely`+"\n") + + // Redeploy the bundle with the --force flag and confirm that the out of band modification is ignored. + _, stderr, err := deployBundleWithArgs(t, ctx, root, "--force") + require.NoError(t, err) + assert.Contains(t, stderr, `Deployment complete!`+"\n") +} diff --git a/internal/bundle/helpers.go b/internal/bundle/helpers.go index b8c81a8d..8f1a866f 100644 --- a/internal/bundle/helpers.go +++ b/internal/bundle/helpers.go @@ -11,6 +11,7 @@ import ( "strings" "testing" + "github.com/databricks/cli/bundle" "github.com/databricks/cli/cmd/root" "github.com/databricks/cli/internal" "github.com/databricks/cli/libs/cmdio" @@ -66,6 +67,19 @@ func validateBundle(t *testing.T, ctx context.Context, path string) ([]byte, err return stdout.Bytes(), err } +func mustValidateBundle(t *testing.T, ctx context.Context, path string) []byte { + data, err := validateBundle(t, ctx, path) + require.NoError(t, err) + return data +} + +func unmarshalConfig(t *testing.T, data []byte) *bundle.Bundle { + bundle := &bundle.Bundle{} + err := json.Unmarshal(data, &bundle.Config) + require.NoError(t, err) + return bundle +} + func deployBundle(t *testing.T, ctx context.Context, path string) error { ctx = env.Set(ctx, "BUNDLE_ROOT", path) c := internal.NewCobraTestRunnerWithContext(t, ctx, "bundle", "deploy", "--force-lock", "--auto-approve") @@ -73,6 +87,14 @@ func deployBundle(t *testing.T, ctx context.Context, path string) error { return err } +func deployBundleWithArgs(t *testing.T, ctx context.Context, path string, args ...string) (string, string, error) { + ctx = env.Set(ctx, "BUNDLE_ROOT", path) + args = append([]string{"bundle", "deploy"}, args...) + c := internal.NewCobraTestRunnerWithContext(t, ctx, args...) + stdout, stderr, err := c.Run() + return stdout.String(), stderr.String(), err +} + func deployBundleWithFlags(t *testing.T, ctx context.Context, path string, flags []string) error { ctx = env.Set(ctx, "BUNDLE_ROOT", path) args := []string{"bundle", "deploy", "--force-lock"} diff --git a/internal/bundle/python_wheel_test.go b/internal/bundle/python_wheel_test.go index ed98efec..846f1417 100644 --- a/internal/bundle/python_wheel_test.go +++ b/internal/bundle/python_wheel_test.go @@ -5,17 +5,18 @@ import ( "github.com/databricks/cli/internal" "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/env" "github.com/google/uuid" "github.com/stretchr/testify/require" ) -func runPythonWheelTest(t *testing.T, sparkVersion string, pythonWheelWrapper bool) { +func runPythonWheelTest(t *testing.T, templateName string, sparkVersion string, pythonWheelWrapper bool) { ctx, _ := acc.WorkspaceTest(t) nodeTypeId := internal.GetNodeTypeId(env.Get(ctx, "CLOUD_ENV")) instancePoolId := env.Get(ctx, "TEST_INSTANCE_POOL_ID") - bundleRoot, err := initTestTemplate(t, ctx, "python_wheel_task", map[string]any{ + bundleRoot, err := initTestTemplate(t, ctx, templateName, map[string]any{ "node_type_id": nodeTypeId, "unique_id": uuid.New().String(), "spark_version": sparkVersion, @@ -45,9 +46,19 @@ func runPythonWheelTest(t *testing.T, sparkVersion string, pythonWheelWrapper bo } func TestAccPythonWheelTaskDeployAndRunWithoutWrapper(t *testing.T) { - runPythonWheelTest(t, "13.3.x-snapshot-scala2.12", false) + runPythonWheelTest(t, "python_wheel_task", "13.3.x-snapshot-scala2.12", false) } func TestAccPythonWheelTaskDeployAndRunWithWrapper(t *testing.T) { - runPythonWheelTest(t, "12.2.x-scala2.12", true) + runPythonWheelTest(t, "python_wheel_task", "12.2.x-scala2.12", true) +} + +func TestAccPythonWheelTaskDeployAndRunOnInteractiveCluster(t *testing.T) { + _, wt := acc.WorkspaceTest(t) + + if testutil.IsAWSCloud(wt.T) { + t.Skip("Skipping test for AWS cloud because it is not permitted to create clusters") + } + + runPythonWheelTest(t, "python_wheel_task_with_cluster", defaultSparkVersion, false) } diff --git a/internal/dashboard_assumptions_test.go b/internal/dashboard_assumptions_test.go index 912e046b..64294873 100644 --- a/internal/dashboard_assumptions_test.go +++ b/internal/dashboard_assumptions_test.go @@ -30,10 +30,12 @@ func TestAccDashboardAssumptions_WorkspaceImport(t *testing.T) { dir := wt.TemporaryWorkspaceDir("dashboard-assumptions-") dashboard, err := wt.W.Lakeview.Create(ctx, dashboards.CreateDashboardRequest{ - DisplayName: dashboardName, - ParentPath: dir, - SerializedDashboard: string(dashboardPayload), - WarehouseId: warehouseId, + Dashboard: &dashboards.Dashboard{ + DisplayName: dashboardName, + ParentPath: dir, + SerializedDashboard: string(dashboardPayload), + WarehouseId: warehouseId, + }, }) require.NoError(t, err) t.Logf("Dashboard ID (per Lakeview API): %s", dashboard.DashboardId) @@ -62,9 +64,11 @@ func TestAccDashboardAssumptions_WorkspaceImport(t *testing.T) { // Try to overwrite the dashboard via the Lakeview API (and expect failure). { _, err := wt.W.Lakeview.Create(ctx, dashboards.CreateDashboardRequest{ - DisplayName: dashboardName, - ParentPath: dir, - SerializedDashboard: string(dashboardPayload), + Dashboard: &dashboards.Dashboard{ + DisplayName: dashboardName, + ParentPath: dir, + SerializedDashboard: string(dashboardPayload), + }, }) require.ErrorIs(t, err, apierr.ErrResourceAlreadyExists) } diff --git a/internal/filer_test.go b/internal/filer_test.go index bc4c9480..20207d34 100644 --- a/internal/filer_test.go +++ b/internal/filer_test.go @@ -39,7 +39,7 @@ func (f filerTest) assertContents(ctx context.Context, name string, contents str assert.Equal(f, contents, body.String()) } -func (f filerTest) assertContentsJupyter(ctx context.Context, name string) { +func (f filerTest) assertContentsJupyter(ctx context.Context, name string, language string) { reader, err := f.Read(ctx, name) if !assert.NoError(f, err) { return @@ -62,6 +62,7 @@ func (f filerTest) assertContentsJupyter(ctx context.Context, name string) { // Since a roundtrip to the workspace changes a Jupyter notebook's payload, // the best we can do is assert that the nbformat is correct. assert.EqualValues(f, 4, actual["nbformat"]) + assert.Equal(f, language, actual["metadata"].(map[string]any)["language_info"].(map[string]any)["name"]) } func (f filerTest) assertNotExists(ctx context.Context, name string) { @@ -360,146 +361,114 @@ func TestAccFilerReadDir(t *testing.T) { } } -var jupyterNotebookContent1 = ` -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "print(\"Jupyter Notebook Version 1\")" - ] - } - ], - "metadata": { - "language_info": { - "name": "python" - }, - "orig_nbformat": 4 - }, - "nbformat": 4, - "nbformat_minor": 2 - } -` - -var jupyterNotebookContent2 = ` -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "print(\"Jupyter Notebook Version 2\")" - ] - } - ], - "metadata": { - "language_info": { - "name": "python" - }, - "orig_nbformat": 4 - }, - "nbformat": 4, - "nbformat_minor": 2 - } -` - -func TestAccFilerWorkspaceNotebookConflict(t *testing.T) { +func TestAccFilerWorkspaceNotebook(t *testing.T) { t.Parallel() - f, _ := setupWsfsFiler(t) ctx := context.Background() var err error - // Upload the notebooks - err = f.Write(ctx, "pyNb.py", strings.NewReader("# Databricks notebook source\nprint('first upload'))")) - require.NoError(t, err) - err = f.Write(ctx, "rNb.r", strings.NewReader("# Databricks notebook source\nprint('first upload'))")) - require.NoError(t, err) - err = f.Write(ctx, "sqlNb.sql", strings.NewReader("-- Databricks notebook source\n SELECT \"first upload\"")) - require.NoError(t, err) - err = f.Write(ctx, "scalaNb.scala", strings.NewReader("// Databricks notebook source\n println(\"first upload\"))")) - require.NoError(t, err) - err = f.Write(ctx, "jupyterNb.ipynb", strings.NewReader(jupyterNotebookContent1)) - require.NoError(t, err) + tcases := []struct { + name string + nameWithoutExt string + content1 string + expected1 string + content2 string + expected2 string + }{ + { + name: "pyNb.py", + nameWithoutExt: "pyNb", + content1: "# Databricks notebook source\nprint('first upload')", + expected1: "# Databricks notebook source\nprint('first upload')", + content2: "# Databricks notebook source\nprint('second upload')", + expected2: "# Databricks notebook source\nprint('second upload')", + }, + { + name: "rNb.r", + nameWithoutExt: "rNb", + content1: "# Databricks notebook source\nprint('first upload')", + expected1: "# Databricks notebook source\nprint('first upload')", + content2: "# Databricks notebook source\nprint('second upload')", + expected2: "# Databricks notebook source\nprint('second upload')", + }, + { + name: "sqlNb.sql", + nameWithoutExt: "sqlNb", + content1: "-- Databricks notebook source\n SELECT \"first upload\"", + expected1: "-- Databricks notebook source\n SELECT \"first upload\"", + content2: "-- Databricks notebook source\n SELECT \"second upload\"", + expected2: "-- Databricks notebook source\n SELECT \"second upload\"", + }, + { + name: "scalaNb.scala", + nameWithoutExt: "scalaNb", + content1: "// Databricks notebook source\n println(\"first upload\")", + expected1: "// Databricks notebook source\n println(\"first upload\")", + content2: "// Databricks notebook source\n println(\"second upload\")", + expected2: "// Databricks notebook source\n println(\"second upload\")", + }, + { + name: "pythonJupyterNb.ipynb", + nameWithoutExt: "pythonJupyterNb", + content1: readFile(t, "testdata/notebooks/py1.ipynb"), + expected1: "# Databricks notebook source\nprint(1)", + content2: readFile(t, "testdata/notebooks/py2.ipynb"), + expected2: "# Databricks notebook source\nprint(2)", + }, + { + name: "rJupyterNb.ipynb", + nameWithoutExt: "rJupyterNb", + content1: readFile(t, "testdata/notebooks/r1.ipynb"), + expected1: "# Databricks notebook source\nprint(1)", + content2: readFile(t, "testdata/notebooks/r2.ipynb"), + expected2: "# Databricks notebook source\nprint(2)", + }, + { + name: "scalaJupyterNb.ipynb", + nameWithoutExt: "scalaJupyterNb", + content1: readFile(t, "testdata/notebooks/scala1.ipynb"), + expected1: "// Databricks notebook source\nprintln(1)", + content2: readFile(t, "testdata/notebooks/scala2.ipynb"), + expected2: "// Databricks notebook source\nprintln(2)", + }, + { + name: "sqlJupyterNotebook.ipynb", + nameWithoutExt: "sqlJupyterNotebook", + content1: readFile(t, "testdata/notebooks/sql1.ipynb"), + expected1: "-- Databricks notebook source\nselect 1", + content2: readFile(t, "testdata/notebooks/sql2.ipynb"), + expected2: "-- Databricks notebook source\nselect 2", + }, + } - // Assert contents after initial upload - filerTest{t, f}.assertContents(ctx, "pyNb", "# Databricks notebook source\nprint('first upload'))") - filerTest{t, f}.assertContents(ctx, "rNb", "# Databricks notebook source\nprint('first upload'))") - filerTest{t, f}.assertContents(ctx, "sqlNb", "-- Databricks notebook source\n SELECT \"first upload\"") - filerTest{t, f}.assertContents(ctx, "scalaNb", "// Databricks notebook source\n println(\"first upload\"))") - filerTest{t, f}.assertContents(ctx, "jupyterNb", "# Databricks notebook source\nprint(\"Jupyter Notebook Version 1\")") + for _, tc := range tcases { + f, _ := setupWsfsFiler(t) + t.Run(tc.name, func(t *testing.T) { + t.Parallel() - // Assert uploading a second time fails due to overwrite mode missing - err = f.Write(ctx, "pyNb.py", strings.NewReader("# Databricks notebook source\nprint('second upload'))")) - assert.ErrorIs(t, err, fs.ErrExist) - assert.Regexp(t, regexp.MustCompile(`file already exists: .*/pyNb$`), err.Error()) + // Upload the notebook + err = f.Write(ctx, tc.name, strings.NewReader(tc.content1)) + require.NoError(t, err) - err = f.Write(ctx, "rNb.r", strings.NewReader("# Databricks notebook source\nprint('second upload'))")) - assert.ErrorIs(t, err, fs.ErrExist) - assert.Regexp(t, regexp.MustCompile(`file already exists: .*/rNb$`), err.Error()) + // Assert contents after initial upload. Note that we expect the content + // for jupyter notebooks to be of type source because the workspace files + // client always uses the source format to read notebooks from the workspace. + filerTest{t, f}.assertContents(ctx, tc.nameWithoutExt, tc.expected1) - err = f.Write(ctx, "sqlNb.sql", strings.NewReader("# Databricks notebook source\n SELECT \"second upload\")")) - assert.ErrorIs(t, err, fs.ErrExist) - assert.Regexp(t, regexp.MustCompile(`file already exists: .*/sqlNb$`), err.Error()) + // Assert uploading a second time fails due to overwrite mode missing + err = f.Write(ctx, tc.name, strings.NewReader(tc.content2)) + assert.ErrorIs(t, err, fs.ErrExist) + assert.Regexp(t, regexp.MustCompile(`file already exists: .*/`+tc.nameWithoutExt+`$`), err.Error()) - err = f.Write(ctx, "scalaNb.scala", strings.NewReader("# Databricks notebook source\n println(\"second upload\"))")) - assert.ErrorIs(t, err, fs.ErrExist) - assert.Regexp(t, regexp.MustCompile(`file already exists: .*/scalaNb$`), err.Error()) + // Try uploading the notebook again with overwrite flag. This time it should succeed. + err = f.Write(ctx, tc.name, strings.NewReader(tc.content2), filer.OverwriteIfExists) + require.NoError(t, err) - err = f.Write(ctx, "jupyterNb.ipynb", strings.NewReader(jupyterNotebookContent2)) - assert.ErrorIs(t, err, fs.ErrExist) - assert.Regexp(t, regexp.MustCompile(`file already exists: .*/jupyterNb$`), err.Error()) -} + // Assert contents after second upload + filerTest{t, f}.assertContents(ctx, tc.nameWithoutExt, tc.expected2) + }) + } -func TestAccFilerWorkspaceNotebookWithOverwriteFlag(t *testing.T) { - t.Parallel() - - f, _ := setupWsfsFiler(t) - ctx := context.Background() - var err error - - // Upload notebooks - err = f.Write(ctx, "pyNb.py", strings.NewReader("# Databricks notebook source\nprint('first upload'))")) - require.NoError(t, err) - err = f.Write(ctx, "rNb.r", strings.NewReader("# Databricks notebook source\nprint('first upload'))")) - require.NoError(t, err) - err = f.Write(ctx, "sqlNb.sql", strings.NewReader("-- Databricks notebook source\n SELECT \"first upload\"")) - require.NoError(t, err) - err = f.Write(ctx, "scalaNb.scala", strings.NewReader("// Databricks notebook source\n println(\"first upload\"))")) - require.NoError(t, err) - err = f.Write(ctx, "jupyterNb.ipynb", strings.NewReader(jupyterNotebookContent1)) - require.NoError(t, err) - - // Assert contents after initial upload - filerTest{t, f}.assertContents(ctx, "pyNb", "# Databricks notebook source\nprint('first upload'))") - filerTest{t, f}.assertContents(ctx, "rNb", "# Databricks notebook source\nprint('first upload'))") - filerTest{t, f}.assertContents(ctx, "sqlNb", "-- Databricks notebook source\n SELECT \"first upload\"") - filerTest{t, f}.assertContents(ctx, "scalaNb", "// Databricks notebook source\n println(\"first upload\"))") - filerTest{t, f}.assertContents(ctx, "jupyterNb", "# Databricks notebook source\nprint(\"Jupyter Notebook Version 1\")") - - // Upload notebooks a second time, overwriting the initial uplaods - err = f.Write(ctx, "pyNb.py", strings.NewReader("# Databricks notebook source\nprint('second upload'))"), filer.OverwriteIfExists) - require.NoError(t, err) - err = f.Write(ctx, "rNb.r", strings.NewReader("# Databricks notebook source\nprint('second upload'))"), filer.OverwriteIfExists) - require.NoError(t, err) - err = f.Write(ctx, "sqlNb.sql", strings.NewReader("-- Databricks notebook source\n SELECT \"second upload\""), filer.OverwriteIfExists) - require.NoError(t, err) - err = f.Write(ctx, "scalaNb.scala", strings.NewReader("// Databricks notebook source\n println(\"second upload\"))"), filer.OverwriteIfExists) - require.NoError(t, err) - err = f.Write(ctx, "jupyterNb.ipynb", strings.NewReader(jupyterNotebookContent2), filer.OverwriteIfExists) - require.NoError(t, err) - - // Assert contents have been overwritten - filerTest{t, f}.assertContents(ctx, "pyNb", "# Databricks notebook source\nprint('second upload'))") - filerTest{t, f}.assertContents(ctx, "rNb", "# Databricks notebook source\nprint('second upload'))") - filerTest{t, f}.assertContents(ctx, "sqlNb", "-- Databricks notebook source\n SELECT \"second upload\"") - filerTest{t, f}.assertContents(ctx, "scalaNb", "// Databricks notebook source\n println(\"second upload\"))") - filerTest{t, f}.assertContents(ctx, "jupyterNb", "# Databricks notebook source\nprint(\"Jupyter Notebook Version 2\")") } func TestAccFilerWorkspaceFilesExtensionsReadDir(t *testing.T) { @@ -515,11 +484,13 @@ func TestAccFilerWorkspaceFilesExtensionsReadDir(t *testing.T) { {"foo.r", "print('foo')"}, {"foo.scala", "println('foo')"}, {"foo.sql", "SELECT 'foo'"}, - {"jupyterNb.ipynb", jupyterNotebookContent1}, - {"jupyterNb2.ipynb", jupyterNotebookContent2}, + {"py1.ipynb", readFile(t, "testdata/notebooks/py1.ipynb")}, {"pyNb.py", "# Databricks notebook source\nprint('first upload'))"}, + {"r1.ipynb", readFile(t, "testdata/notebooks/r1.ipynb")}, {"rNb.r", "# Databricks notebook source\nprint('first upload'))"}, + {"scala1.ipynb", readFile(t, "testdata/notebooks/scala1.ipynb")}, {"scalaNb.scala", "// Databricks notebook source\n println(\"first upload\"))"}, + {"sql1.ipynb", readFile(t, "testdata/notebooks/sql1.ipynb")}, {"sqlNb.sql", "-- Databricks notebook source\n SELECT \"first upload\""}, } @@ -554,11 +525,13 @@ func TestAccFilerWorkspaceFilesExtensionsReadDir(t *testing.T) { "foo.r", "foo.scala", "foo.sql", - "jupyterNb.ipynb", - "jupyterNb2.ipynb", + "py1.ipynb", "pyNb.py", + "r1.ipynb", "rNb.r", + "scala1.ipynb", "scalaNb.scala", + "sql1.ipynb", "sqlNb.sql", }, names) @@ -582,7 +555,10 @@ func setupFilerWithExtensionsTest(t *testing.T) filer.Filer { }{ {"foo.py", "# Databricks notebook source\nprint('first upload'))"}, {"bar.py", "print('foo')"}, - {"jupyter.ipynb", jupyterNotebookContent1}, + {"p1.ipynb", readFile(t, "testdata/notebooks/py1.ipynb")}, + {"r1.ipynb", readFile(t, "testdata/notebooks/r1.ipynb")}, + {"scala1.ipynb", readFile(t, "testdata/notebooks/scala1.ipynb")}, + {"sql1.ipynb", readFile(t, "testdata/notebooks/sql1.ipynb")}, {"pretender", "not a notebook"}, {"dir/file.txt", "file content"}, {"scala-notebook.scala", "// Databricks notebook source\nprintln('first upload')"}, @@ -608,11 +584,15 @@ func TestAccFilerWorkspaceFilesExtensionsRead(t *testing.T) { // Read contents of test fixtures as a sanity check. filerTest{t, wf}.assertContents(ctx, "foo.py", "# Databricks notebook source\nprint('first upload'))") filerTest{t, wf}.assertContents(ctx, "bar.py", "print('foo')") - filerTest{t, wf}.assertContentsJupyter(ctx, "jupyter.ipynb") filerTest{t, wf}.assertContents(ctx, "dir/file.txt", "file content") filerTest{t, wf}.assertContents(ctx, "scala-notebook.scala", "// Databricks notebook source\nprintln('first upload')") filerTest{t, wf}.assertContents(ctx, "pretender", "not a notebook") + filerTest{t, wf}.assertContentsJupyter(ctx, "p1.ipynb", "python") + filerTest{t, wf}.assertContentsJupyter(ctx, "r1.ipynb", "r") + filerTest{t, wf}.assertContentsJupyter(ctx, "scala1.ipynb", "scala") + filerTest{t, wf}.assertContentsJupyter(ctx, "sql1.ipynb", "sql") + // Read non-existent file _, err := wf.Read(ctx, "non-existent.py") assert.ErrorIs(t, err, fs.ErrNotExist) @@ -638,35 +618,41 @@ func TestAccFilerWorkspaceFilesExtensionsDelete(t *testing.T) { ctx := context.Background() wf := setupFilerWithExtensionsTest(t) - // Delete notebook - err := wf.Delete(ctx, "foo.py") - require.NoError(t, err) - filerTest{t, wf}.assertNotExists(ctx, "foo.py") + for _, fileName := range []string{ + // notebook + "foo.py", + // file + "bar.py", + // python jupyter notebook + "p1.ipynb", + // R jupyter notebook + "r1.ipynb", + // Scala jupyter notebook + "scala1.ipynb", + // SQL jupyter notebook + "sql1.ipynb", + } { + err := wf.Delete(ctx, fileName) + require.NoError(t, err) + filerTest{t, wf}.assertNotExists(ctx, fileName) + } - // Delete file - err = wf.Delete(ctx, "bar.py") - require.NoError(t, err) - filerTest{t, wf}.assertNotExists(ctx, "bar.py") - - // Delete jupyter notebook - err = wf.Delete(ctx, "jupyter.ipynb") - require.NoError(t, err) - filerTest{t, wf}.assertNotExists(ctx, "jupyter.ipynb") - - // Delete non-existent file - err = wf.Delete(ctx, "non-existent.py") - assert.ErrorIs(t, err, fs.ErrNotExist) - - // Ensure we do not delete a file as a notebook - err = wf.Delete(ctx, "pretender.py") - assert.ErrorIs(t, err, fs.ErrNotExist) - - // Ensure we do not delete a Scala notebook as a Python notebook - _, err = wf.Read(ctx, "scala-notebook.py") - assert.ErrorIs(t, err, fs.ErrNotExist) + for _, fileName := range []string{ + // do not delete non-existent file + "non-existent.py", + // do not delete a file assuming it is a notebook and stripping the extension + "pretender.py", + // do not delete a Scala notebook as a Python notebook + "scala-notebook.py", + // do not delete a file assuming it is a Jupyter notebook and stripping the extension + "pretender.ipynb", + } { + err := wf.Delete(ctx, fileName) + assert.ErrorIs(t, err, fs.ErrNotExist) + } // Delete directory - err = wf.Delete(ctx, "dir") + err := wf.Delete(ctx, "dir") assert.ErrorIs(t, err, fs.ErrInvalid) // Delete directory recursively @@ -681,44 +667,45 @@ func TestAccFilerWorkspaceFilesExtensionsStat(t *testing.T) { ctx := context.Background() wf := setupFilerWithExtensionsTest(t) - // Stat on a notebook - info, err := wf.Stat(ctx, "foo.py") - require.NoError(t, err) - assert.Equal(t, "foo.py", info.Name()) - assert.False(t, info.IsDir()) - - // Stat on a file - info, err = wf.Stat(ctx, "bar.py") - require.NoError(t, err) - assert.Equal(t, "bar.py", info.Name()) - assert.False(t, info.IsDir()) - - // Stat on a Jupyter notebook - info, err = wf.Stat(ctx, "jupyter.ipynb") - require.NoError(t, err) - assert.Equal(t, "jupyter.ipynb", info.Name()) - assert.False(t, info.IsDir()) + for _, fileName := range []string{ + // notebook + "foo.py", + // file + "bar.py", + // python jupyter notebook + "p1.ipynb", + // R jupyter notebook + "r1.ipynb", + // Scala jupyter notebook + "scala1.ipynb", + // SQL jupyter notebook + "sql1.ipynb", + } { + info, err := wf.Stat(ctx, fileName) + require.NoError(t, err) + assert.Equal(t, fileName, info.Name()) + assert.False(t, info.IsDir()) + } // Stat on a directory - info, err = wf.Stat(ctx, "dir") + info, err := wf.Stat(ctx, "dir") require.NoError(t, err) assert.Equal(t, "dir", info.Name()) assert.True(t, info.IsDir()) - // Stat on a non-existent file - _, err = wf.Stat(ctx, "non-existent.py") - assert.ErrorIs(t, err, fs.ErrNotExist) - - // Ensure we do not stat a file as a notebook - _, err = wf.Stat(ctx, "pretender.py") - assert.ErrorIs(t, err, fs.ErrNotExist) - - // Ensure we do not stat a Scala notebook as a Python notebook - _, err = wf.Stat(ctx, "scala-notebook.py") - assert.ErrorIs(t, err, fs.ErrNotExist) - - _, err = wf.Stat(ctx, "pretender.ipynb") - assert.ErrorIs(t, err, fs.ErrNotExist) + for _, fileName := range []string{ + // non-existent file + "non-existent.py", + // do not stat a file assuming it is a notebook and stripping the extension + "pretender.py", + // do not stat a Scala notebook as a Python notebook + "scala-notebook.py", + // do not read a regular file assuming it is a Jupyter notebook and stripping the extension + "pretender.ipynb", + } { + _, err := wf.Stat(ctx, fileName) + assert.ErrorIs(t, err, fs.ErrNotExist) + } } func TestAccWorkspaceFilesExtensionsDirectoriesAreNotNotebooks(t *testing.T) { @@ -739,32 +726,115 @@ func TestAccWorkspaceFilesExtensionsDirectoriesAreNotNotebooks(t *testing.T) { func TestAccWorkspaceFilesExtensions_ExportFormatIsPreserved(t *testing.T) { t.Parallel() - ctx := context.Background() - wf, _ := setupWsfsExtensionsFiler(t) + // Case 1: Writing source notebooks. + for _, tc := range []struct { + language string + sourceName string + sourceContent string + jupyterName string + jupyterContent string + }{ + { + language: "python", + sourceName: "foo.py", + sourceContent: "# Databricks notebook source\nprint('foo')", + jupyterName: "foo.ipynb", + }, + { + language: "r", + sourceName: "foo.r", + sourceContent: "# Databricks notebook source\nprint('foo')", + jupyterName: "foo.ipynb", + }, + { + language: "scala", + sourceName: "foo.scala", + sourceContent: "// Databricks notebook source\nprintln('foo')", + jupyterName: "foo.ipynb", + }, + { + language: "sql", + sourceName: "foo.sql", + sourceContent: "-- Databricks notebook source\nselect 'foo'", + jupyterName: "foo.ipynb", + }, + } { + t.Run("source_"+tc.language, func(t *testing.T) { + t.Parallel() - // Case 1: Source Notebook - err := wf.Write(ctx, "foo.py", strings.NewReader("# Databricks notebook source\nprint('foo')")) - require.NoError(t, err) + ctx := context.Background() + wf, _ := setupWsfsExtensionsFiler(t) - // The source notebook should exist but not the Jupyter notebook - filerTest{t, wf}.assertContents(ctx, "foo.py", "# Databricks notebook source\nprint('foo')") - _, err = wf.Stat(ctx, "foo.ipynb") - assert.ErrorIs(t, err, fs.ErrNotExist) - _, err = wf.Read(ctx, "foo.ipynb") - assert.ErrorIs(t, err, fs.ErrNotExist) - err = wf.Delete(ctx, "foo.ipynb") - assert.ErrorIs(t, err, fs.ErrNotExist) + err := wf.Write(ctx, tc.sourceName, strings.NewReader(tc.sourceContent)) + require.NoError(t, err) - // Case 2: Jupyter Notebook - err = wf.Write(ctx, "bar.ipynb", strings.NewReader(jupyterNotebookContent1)) - require.NoError(t, err) + // Assert on the content of the source notebook that's been written. + filerTest{t, wf}.assertContents(ctx, tc.sourceName, tc.sourceContent) - // The Jupyter notebook should exist but not the source notebook - filerTest{t, wf}.assertContentsJupyter(ctx, "bar.ipynb") - _, err = wf.Stat(ctx, "bar.py") - assert.ErrorIs(t, err, fs.ErrNotExist) - _, err = wf.Read(ctx, "bar.py") - assert.ErrorIs(t, err, fs.ErrNotExist) - err = wf.Delete(ctx, "bar.py") - assert.ErrorIs(t, err, fs.ErrNotExist) + // Ensure that the source notebook is not read when the name contains + // the .ipynb extension. + _, err = wf.Stat(ctx, tc.jupyterName) + assert.ErrorIs(t, err, fs.ErrNotExist) + _, err = wf.Read(ctx, tc.jupyterName) + assert.ErrorIs(t, err, fs.ErrNotExist) + err = wf.Delete(ctx, tc.jupyterName) + assert.ErrorIs(t, err, fs.ErrNotExist) + }) + } + + // Case 2: Writing Jupyter notebooks. + for _, tc := range []struct { + language string + sourceName string + jupyterName string + jupyterContent string + }{ + { + language: "python", + sourceName: "foo.py", + jupyterName: "foo.ipynb", + jupyterContent: readFile(t, "testdata/notebooks/py1.ipynb"), + }, + { + language: "r", + sourceName: "foo.r", + jupyterName: "foo.ipynb", + jupyterContent: readFile(t, "testdata/notebooks/r1.ipynb"), + }, + { + language: "scala", + sourceName: "foo.scala", + jupyterName: "foo.ipynb", + jupyterContent: readFile(t, "testdata/notebooks/scala1.ipynb"), + }, + { + language: "sql", + sourceName: "foo.sql", + jupyterName: "foo.ipynb", + jupyterContent: readFile(t, "testdata/notebooks/sql1.ipynb"), + }, + } { + t.Run("jupyter_"+tc.language, func(t *testing.T) { + t.Parallel() + + ctx := context.Background() + wf, _ := setupWsfsExtensionsFiler(t) + + err := wf.Write(ctx, tc.jupyterName, strings.NewReader(tc.jupyterContent)) + require.NoError(t, err) + + // Assert that the written notebook is jupyter and has the correct + // language_info metadata set. + filerTest{t, wf}.assertContentsJupyter(ctx, tc.jupyterName, tc.language) + + // Ensure that the Jupyter notebook is not read when the name does not + // contain the .ipynb extension. + _, err = wf.Stat(ctx, tc.sourceName) + assert.ErrorIs(t, err, fs.ErrNotExist) + _, err = wf.Read(ctx, tc.sourceName) + assert.ErrorIs(t, err, fs.ErrNotExist) + err = wf.Delete(ctx, tc.sourceName) + assert.ErrorIs(t, err, fs.ErrNotExist) + }) + } } diff --git a/internal/helpers.go b/internal/helpers.go index 3bf38775..3e4b4e97 100644 --- a/internal/helpers.go +++ b/internal/helpers.go @@ -352,6 +352,13 @@ func RequireErrorRun(t *testing.T, args ...string) (bytes.Buffer, bytes.Buffer, return stdout, stderr, err } +func readFile(t *testing.T, name string) string { + b, err := os.ReadFile(name) + require.NoError(t, err) + + return string(b) +} + func writeFile(t *testing.T, name string, body string) string { f, err := os.Create(filepath.Join(t.TempDir(), name)) require.NoError(t, err) @@ -562,12 +569,10 @@ func setupLocalFiler(t *testing.T) (filer.Filer, string) { } func setupWsfsFiler(t *testing.T) (filer.Filer, string) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) + ctx, wt := acc.WorkspaceTest(t) - ctx := context.Background() - w := databricks.Must(databricks.NewWorkspaceClient()) - tmpdir := TemporaryWorkspaceDir(t, w) - f, err := filer.NewWorkspaceFilesClient(w, tmpdir) + tmpdir := TemporaryWorkspaceDir(t, wt.W) + f, err := filer.NewWorkspaceFilesClient(wt.W, tmpdir) require.NoError(t, err) // Check if we can use this API here, skip test if we cannot. @@ -581,11 +586,10 @@ func setupWsfsFiler(t *testing.T) (filer.Filer, string) { } func setupWsfsExtensionsFiler(t *testing.T) (filer.Filer, string) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) + _, wt := acc.WorkspaceTest(t) - w := databricks.Must(databricks.NewWorkspaceClient()) - tmpdir := TemporaryWorkspaceDir(t, w) - f, err := filer.NewWorkspaceFilesExtensionsClient(w, tmpdir) + tmpdir := TemporaryWorkspaceDir(t, wt.W) + f, err := filer.NewWorkspaceFilesExtensionsClient(wt.W, tmpdir) require.NoError(t, err) return f, tmpdir diff --git a/internal/testdata/notebooks/py1.ipynb b/internal/testdata/notebooks/py1.ipynb new file mode 100644 index 00000000..0a44ce0e --- /dev/null +++ b/internal/testdata/notebooks/py1.ipynb @@ -0,0 +1,27 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(1)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "name": "python", + "version": "3.8.13" + }, + "orig_nbformat": 4 + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/internal/testdata/notebooks/py2.ipynb b/internal/testdata/notebooks/py2.ipynb new file mode 100644 index 00000000..8b2ccde1 --- /dev/null +++ b/internal/testdata/notebooks/py2.ipynb @@ -0,0 +1,27 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(2)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "name": "python", + "version": "3.8.13" + }, + "orig_nbformat": 4 + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/internal/testdata/notebooks/r1.ipynb b/internal/testdata/notebooks/r1.ipynb new file mode 100644 index 00000000..6280426a --- /dev/null +++ b/internal/testdata/notebooks/r1.ipynb @@ -0,0 +1,25 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(1)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "R", + "language": "R", + "name": "ir" + }, + "language_info": { + "name": "R" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/internal/testdata/notebooks/r2.ipynb b/internal/testdata/notebooks/r2.ipynb new file mode 100644 index 00000000..f2ff413d --- /dev/null +++ b/internal/testdata/notebooks/r2.ipynb @@ -0,0 +1,29 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "vscode": { + "languageId": "r" + } + }, + "outputs": [], + "source": [ + "print(2)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "R", + "language": "R", + "name": "ir" + }, + "language_info": { + "name": "R" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/internal/testdata/notebooks/scala1.ipynb b/internal/testdata/notebooks/scala1.ipynb new file mode 100644 index 00000000..25a5a187 --- /dev/null +++ b/internal/testdata/notebooks/scala1.ipynb @@ -0,0 +1,38 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "1\n" + ] + } + ], + "source": [ + "println(1)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Scala", + "language": "scala", + "name": "scala" + }, + "language_info": { + "codemirror_mode": "text/x-scala", + "file_extension": ".sc", + "mimetype": "text/x-scala", + "name": "scala", + "nbconvert_exporter": "script", + "version": "2.13.14" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/internal/testdata/notebooks/scala2.ipynb b/internal/testdata/notebooks/scala2.ipynb new file mode 100644 index 00000000..353fc29f --- /dev/null +++ b/internal/testdata/notebooks/scala2.ipynb @@ -0,0 +1,38 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "1\n" + ] + } + ], + "source": [ + "println(2)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Scala", + "language": "scala", + "name": "scala" + }, + "language_info": { + "codemirror_mode": "text/x-scala", + "file_extension": ".sc", + "mimetype": "text/x-scala", + "name": "scala", + "nbconvert_exporter": "script", + "version": "2.13.14" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/internal/testdata/notebooks/sql1.ipynb b/internal/testdata/notebooks/sql1.ipynb new file mode 100644 index 00000000..7a3562a1 --- /dev/null +++ b/internal/testdata/notebooks/sql1.ipynb @@ -0,0 +1,20 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "select 1" + ] + } + ], + "metadata": { + "language_info": { + "name": "sql" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/internal/testdata/notebooks/sql2.ipynb b/internal/testdata/notebooks/sql2.ipynb new file mode 100644 index 00000000..7780e1da --- /dev/null +++ b/internal/testdata/notebooks/sql2.ipynb @@ -0,0 +1,20 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "select 2" + ] + } + ], + "metadata": { + "language_info": { + "name": "sql" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/libs/auth/oauth.go b/libs/auth/oauth.go index 7c1cb957..026c4546 100644 --- a/libs/auth/oauth.go +++ b/libs/auth/oauth.go @@ -9,6 +9,7 @@ import ( "errors" "fmt" "net" + "net/url" "strings" "time" @@ -143,6 +144,26 @@ func (a *PersistentAuth) Challenge(ctx context.Context) error { return nil } +// This function cleans up the host URL by only retaining the scheme and the host. +// This function thus removes any path, query arguments, or fragments from the URL. +func (a *PersistentAuth) cleanHost() { + parsedHost, err := url.Parse(a.Host) + if err != nil { + return + } + // when either host or scheme is empty, we don't want to clean it. This is because + // the Go url library parses a raw "abc" string as the path of a URL and cleaning + // it will return thus return an empty string. + if parsedHost.Host == "" || parsedHost.Scheme == "" { + return + } + host := url.URL{ + Scheme: parsedHost.Scheme, + Host: parsedHost.Host, + } + a.Host = host.String() +} + func (a *PersistentAuth) init(ctx context.Context) error { if a.Host == "" && a.AccountID == "" { return ErrFetchCredentials @@ -156,6 +177,9 @@ func (a *PersistentAuth) init(ctx context.Context) error { if a.browser == nil { a.browser = browser.OpenURL } + + a.cleanHost() + // try acquire listener, which we also use as a machine-local // exclusive lock to prevent token cache corruption in the scope // of developer machine, where this command runs. diff --git a/libs/auth/oauth_test.go b/libs/auth/oauth_test.go index ea6a8061..fdf0d04b 100644 --- a/libs/auth/oauth_test.go +++ b/libs/auth/oauth_test.go @@ -228,3 +228,37 @@ func TestChallengeFailed(t *testing.T) { assert.EqualError(t, err, "authorize: access_denied: Policy evaluation failed for this request") }) } + +func TestPersistentAuthCleanHost(t *testing.T) { + for _, tcases := range []struct { + in string + out string + }{ + {"https://example.com", "https://example.com"}, + {"https://example.com/", "https://example.com"}, + {"https://example.com/path", "https://example.com"}, + {"https://example.com/path/subpath", "https://example.com"}, + {"https://example.com/path?query=1", "https://example.com"}, + {"https://example.com/path?query=1&other=2", "https://example.com"}, + {"https://example.com/path#fragment", "https://example.com"}, + {"https://example.com/path?query=1#fragment", "https://example.com"}, + {"https://example.com/path?query=1&other=2#fragment", "https://example.com"}, + {"https://example.com/path/subpath?query=1", "https://example.com"}, + {"https://example.com/path/subpath?query=1&other=2", "https://example.com"}, + {"https://example.com/path/subpath#fragment", "https://example.com"}, + {"https://example.com/path/subpath?query=1#fragment", "https://example.com"}, + {"https://example.com/path/subpath?query=1&other=2#fragment", "https://example.com"}, + {"https://example.com/path?query=1%20value&other=2%20value", "https://example.com"}, + {"http://example.com/path/subpath?query=1%20value&other=2%20value", "http://example.com"}, + + // URLs without scheme should be left as is + {"abc", "abc"}, + {"abc.com/def", "abc.com/def"}, + } { + p := &PersistentAuth{ + Host: tcases.in, + } + p.cleanHost() + assert.Equal(t, tcases.out, p.Host) + } +} diff --git a/libs/dbr/context.go b/libs/dbr/context.go new file mode 100644 index 00000000..7512c0fe --- /dev/null +++ b/libs/dbr/context.go @@ -0,0 +1,49 @@ +package dbr + +import "context" + +// key is a package-local type to use for context keys. +// +// Using an unexported type for context keys prevents key collisions across +// packages since external packages cannot create values of this type. +type key int + +const ( + // dbrKey is the context key for the detection result. + // The value of 1 is arbitrary and can be any number. + // Other keys in the same package must have different values. + dbrKey = key(1) +) + +// DetectRuntime detects whether or not the current +// process is running inside a Databricks Runtime environment. +// It return a new context with the detection result set. +func DetectRuntime(ctx context.Context) context.Context { + if v := ctx.Value(dbrKey); v != nil { + panic("dbr.DetectRuntime called twice on the same context") + } + return context.WithValue(ctx, dbrKey, detect(ctx)) +} + +// MockRuntime is a helper function to mock the detection result. +// It returns a new context with the detection result set. +func MockRuntime(ctx context.Context, b bool) context.Context { + if v := ctx.Value(dbrKey); v != nil { + panic("dbr.MockRuntime called twice on the same context") + } + return context.WithValue(ctx, dbrKey, b) +} + +// RunsOnRuntime returns the detection result from the context. +// It expects a context returned by [DetectRuntime] or [MockRuntime]. +// +// We store this value in a context to avoid having to use either +// a global variable, passing a boolean around everywhere, or +// performing the same detection multiple times. +func RunsOnRuntime(ctx context.Context) bool { + v := ctx.Value(dbrKey) + if v == nil { + panic("dbr.RunsOnRuntime called without calling dbr.DetectRuntime first") + } + return v.(bool) +} diff --git a/libs/dbr/context_test.go b/libs/dbr/context_test.go new file mode 100644 index 00000000..fc53cf13 --- /dev/null +++ b/libs/dbr/context_test.go @@ -0,0 +1,59 @@ +package dbr + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestContext_DetectRuntimePanics(t *testing.T) { + ctx := context.Background() + + // Run detection. + ctx = DetectRuntime(ctx) + + // Expect a panic if the detection is run twice. + assert.Panics(t, func() { + ctx = DetectRuntime(ctx) + }) +} + +func TestContext_MockRuntimePanics(t *testing.T) { + ctx := context.Background() + + // Run detection. + ctx = MockRuntime(ctx, true) + + // Expect a panic if the mock function is run twice. + assert.Panics(t, func() { + MockRuntime(ctx, true) + }) +} + +func TestContext_RunsOnRuntimePanics(t *testing.T) { + ctx := context.Background() + + // Expect a panic if the detection is not run. + assert.Panics(t, func() { + RunsOnRuntime(ctx) + }) +} + +func TestContext_RunsOnRuntime(t *testing.T) { + ctx := context.Background() + + // Run detection. + ctx = DetectRuntime(ctx) + + // Expect no panic because detection has run. + assert.NotPanics(t, func() { + RunsOnRuntime(ctx) + }) +} + +func TestContext_RunsOnRuntimeWithMock(t *testing.T) { + ctx := context.Background() + assert.True(t, RunsOnRuntime(MockRuntime(ctx, true))) + assert.False(t, RunsOnRuntime(MockRuntime(ctx, false))) +} diff --git a/libs/dbr/detect.go b/libs/dbr/detect.go new file mode 100644 index 00000000..d8b4dfe2 --- /dev/null +++ b/libs/dbr/detect.go @@ -0,0 +1,35 @@ +package dbr + +import ( + "context" + "os" + "runtime" + + "github.com/databricks/cli/libs/env" +) + +// Dereference [os.Stat] to allow mocking in tests. +var statFunc = os.Stat + +// detect returns true if the current process is running on a Databricks Runtime. +// Its return value is meant to be cached in the context. +func detect(ctx context.Context) bool { + // Databricks Runtime implies Linux. + // Return early on other operating systems. + if runtime.GOOS != "linux" { + return false + } + + // Databricks Runtime always has the DATABRICKS_RUNTIME_VERSION environment variable set. + if value, ok := env.Lookup(ctx, "DATABRICKS_RUNTIME_VERSION"); !ok || value == "" { + return false + } + + // Expect to see a "/databricks" directory. + if fi, err := statFunc("/databricks"); err != nil || !fi.IsDir() { + return false + } + + // All checks passed. + return true +} diff --git a/libs/dbr/detect_test.go b/libs/dbr/detect_test.go new file mode 100644 index 00000000..3a4a43a7 --- /dev/null +++ b/libs/dbr/detect_test.go @@ -0,0 +1,83 @@ +package dbr + +import ( + "context" + "io/fs" + "runtime" + "testing" + + "github.com/databricks/cli/libs/env" + "github.com/databricks/cli/libs/fakefs" + "github.com/stretchr/testify/assert" +) + +func requireLinux(t *testing.T) { + if runtime.GOOS != "linux" { + t.Skipf("skipping test on %s", runtime.GOOS) + } +} + +func configureStatFunc(t *testing.T, fi fs.FileInfo, err error) { + originalFunc := statFunc + statFunc = func(name string) (fs.FileInfo, error) { + assert.Equal(t, "/databricks", name) + return fi, err + } + + t.Cleanup(func() { + statFunc = originalFunc + }) +} + +func TestDetect_NotLinux(t *testing.T) { + if runtime.GOOS == "linux" { + t.Skip("skipping test on Linux OS") + } + + ctx := context.Background() + assert.False(t, detect(ctx)) +} + +func TestDetect_Env(t *testing.T) { + requireLinux(t) + + // Configure other checks to pass. + configureStatFunc(t, fakefs.FileInfo{FakeDir: true}, nil) + + t.Run("empty", func(t *testing.T) { + ctx := env.Set(context.Background(), "DATABRICKS_RUNTIME_VERSION", "") + assert.False(t, detect(ctx)) + }) + + t.Run("non-empty cluster", func(t *testing.T) { + ctx := env.Set(context.Background(), "DATABRICKS_RUNTIME_VERSION", "15.4") + assert.True(t, detect(ctx)) + }) + + t.Run("non-empty serverless", func(t *testing.T) { + ctx := env.Set(context.Background(), "DATABRICKS_RUNTIME_VERSION", "client.1.13") + assert.True(t, detect(ctx)) + }) +} + +func TestDetect_Stat(t *testing.T) { + requireLinux(t) + + // Configure other checks to pass. + ctx := env.Set(context.Background(), "DATABRICKS_RUNTIME_VERSION", "non-empty") + + t.Run("error", func(t *testing.T) { + configureStatFunc(t, nil, fs.ErrNotExist) + assert.False(t, detect(ctx)) + }) + + t.Run("not a directory", func(t *testing.T) { + configureStatFunc(t, fakefs.FileInfo{}, nil) + assert.False(t, detect(ctx)) + }) + + t.Run("directory", func(t *testing.T) { + configureStatFunc(t, fakefs.FileInfo{FakeDir: true}, nil) + assert.True(t, detect(ctx)) + }) +} diff --git a/libs/dyn/convert/struct_info.go b/libs/dyn/convert/struct_info.go index 595e52ed..dc3ed4da 100644 --- a/libs/dyn/convert/struct_info.go +++ b/libs/dyn/convert/struct_info.go @@ -6,7 +6,6 @@ import ( "sync" "github.com/databricks/cli/libs/dyn" - "github.com/databricks/cli/libs/textutil" ) // structInfo holds the type information we need to efficiently @@ -85,14 +84,6 @@ func buildStructInfo(typ reflect.Type) structInfo { } name, _, _ := strings.Cut(sf.Tag.Get("json"), ",") - if typ.Name() == "QualityMonitor" && name == "-" { - urlName, _, _ := strings.Cut(sf.Tag.Get("url"), ",") - if urlName == "" || urlName == "-" { - name = textutil.CamelToSnakeCase(sf.Name) - } else { - name = urlName - } - } if name == "" || name == "-" { continue } diff --git a/libs/dyn/dynassert/assert.go b/libs/dyn/dynassert/assert.go index dc6676ca..f667b08c 100644 --- a/libs/dyn/dynassert/assert.go +++ b/libs/dyn/dynassert/assert.go @@ -111,3 +111,7 @@ func PanicsWithError(t assert.TestingT, errString string, f func(), msgAndArgs . func NotPanics(t assert.TestingT, f func(), msgAndArgs ...interface{}) bool { return assert.NotPanics(t, f, msgAndArgs...) } + +func JSONEq(t assert.TestingT, expected string, actual string, msgAndArgs ...interface{}) bool { + return assert.JSONEq(t, expected, actual, msgAndArgs...) +} diff --git a/libs/dyn/jsonsaver/encoder.go b/libs/dyn/jsonsaver/encoder.go new file mode 100644 index 00000000..66997e96 --- /dev/null +++ b/libs/dyn/jsonsaver/encoder.go @@ -0,0 +1,39 @@ +package jsonsaver + +import ( + "bytes" + "encoding/json" +) + +// The encoder type encapsulates a [json.Encoder] and its target buffer. +// Escaping of HTML characters in the output is disabled. +type encoder struct { + *json.Encoder + *bytes.Buffer +} + +func newEncoder() encoder { + var buf bytes.Buffer + enc := json.NewEncoder(&buf) + + // By default, json.Encoder escapes HTML characters, converting symbols like '<' to '\u003c'. + // This behavior helps prevent XSS attacks when JSON is embedded within HTML. + // However, we disable this feature since we're not dealing with HTML context. + // Keeping the escapes enabled would result in unnecessary differences when processing JSON payloads + // that already contain escaped characters. + enc.SetEscapeHTML(false) + return encoder{enc, &buf} +} + +func marshalNoEscape(v any) ([]byte, error) { + enc := newEncoder() + err := enc.Encode(v) + return enc.Bytes(), err +} + +func marshalIndentNoEscape(v any, prefix, indent string) ([]byte, error) { + enc := newEncoder() + enc.SetIndent(prefix, indent) + err := enc.Encode(v) + return enc.Bytes(), err +} diff --git a/libs/dyn/jsonsaver/encoder_test.go b/libs/dyn/jsonsaver/encoder_test.go new file mode 100644 index 00000000..d1b7d017 --- /dev/null +++ b/libs/dyn/jsonsaver/encoder_test.go @@ -0,0 +1,41 @@ +package jsonsaver + +import ( + "testing" + + assert "github.com/databricks/cli/libs/dyn/dynassert" +) + +func TestEncoder_MarshalNoEscape(t *testing.T) { + out, err := marshalNoEscape("1 < 2") + if !assert.NoError(t, err) { + return + } + + // Confirm the output. + assert.JSONEq(t, `"1 < 2"`, string(out)) + + // Confirm that HTML escaping is disabled. + assert.NotContains(t, string(out), "\\u003c") + + // Confirm that the encoder writes a trailing newline. + assert.Contains(t, string(out), "\n") +} + +func TestEncoder_MarshalIndentNoEscape(t *testing.T) { + out, err := marshalIndentNoEscape([]string{"1 < 2", "2 < 3"}, "", " ") + if !assert.NoError(t, err) { + return + } + + // Confirm the output. + assert.JSONEq(t, `["1 < 2", "2 < 3"]`, string(out)) + + // Confirm that HTML escaping is disabled. + assert.NotContains(t, string(out), "\\u003c") + + // Confirm that the encoder performs indenting and writes a trailing newline. + assert.Contains(t, string(out), "[\n") + assert.Contains(t, string(out), " \"1 < 2\",\n") + assert.Contains(t, string(out), "]\n") +} diff --git a/libs/dyn/jsonsaver/marshal.go b/libs/dyn/jsonsaver/marshal.go new file mode 100644 index 00000000..a78a68f2 --- /dev/null +++ b/libs/dyn/jsonsaver/marshal.go @@ -0,0 +1,89 @@ +package jsonsaver + +import ( + "bytes" + "fmt" + + "github.com/databricks/cli/libs/dyn" +) + +// Marshal is a version of [json.Marshal] for [dyn.Value]. +// +// Objects in the output retain the order of keys as they appear in the underlying [dyn.Value]. +// The output does not escape HTML characters in strings. +func Marshal(v dyn.Value) ([]byte, error) { + return marshalNoEscape(wrap{v}) +} + +// MarshalIndent is a version of [json.MarshalIndent] for [dyn.Value]. +// +// Objects in the output retain the order of keys as they appear in the underlying [dyn.Value]. +// The output does not escape HTML characters in strings. +func MarshalIndent(v dyn.Value, prefix, indent string) ([]byte, error) { + return marshalIndentNoEscape(wrap{v}, prefix, indent) +} + +// Wrapper type for [dyn.Value] to expose the [json.Marshaler] interface. +type wrap struct { + v dyn.Value +} + +// MarshalJSON implements the [json.Marshaler] interface for the [dyn.Value] wrapper type. +func (w wrap) MarshalJSON() ([]byte, error) { + var buf bytes.Buffer + if err := marshalValue(&buf, w.v); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// marshalValue recursively writes JSON for a [dyn.Value] to the buffer. +func marshalValue(buf *bytes.Buffer, v dyn.Value) error { + switch v.Kind() { + case dyn.KindString, dyn.KindBool, dyn.KindInt, dyn.KindFloat, dyn.KindTime, dyn.KindNil: + out, err := marshalNoEscape(v.AsAny()) + if err != nil { + return err + } + + // The encoder writes a trailing newline, so we need to remove it + // to avoid adding extra newlines when embedding this JSON. + out = out[:len(out)-1] + buf.Write(out) + case dyn.KindMap: + buf.WriteByte('{') + for i, pair := range v.MustMap().Pairs() { + if i > 0 { + buf.WriteByte(',') + } + // Require keys to be strings. + if pair.Key.Kind() != dyn.KindString { + return fmt.Errorf("map key must be a string, got %s", pair.Key.Kind()) + } + // Marshal the key + if err := marshalValue(buf, pair.Key); err != nil { + return err + } + buf.WriteByte(':') + // Marshal the value + if err := marshalValue(buf, pair.Value); err != nil { + return err + } + } + buf.WriteByte('}') + case dyn.KindSequence: + buf.WriteByte('[') + for i, item := range v.MustSequence() { + if i > 0 { + buf.WriteByte(',') + } + if err := marshalValue(buf, item); err != nil { + return err + } + } + buf.WriteByte(']') + default: + return fmt.Errorf("unsupported kind: %d", v.Kind()) + } + return nil +} diff --git a/libs/dyn/jsonsaver/marshal_test.go b/libs/dyn/jsonsaver/marshal_test.go new file mode 100644 index 00000000..0b6a3428 --- /dev/null +++ b/libs/dyn/jsonsaver/marshal_test.go @@ -0,0 +1,100 @@ +package jsonsaver + +import ( + "testing" + + "github.com/databricks/cli/libs/dyn" + assert "github.com/databricks/cli/libs/dyn/dynassert" +) + +func TestMarshal_String(t *testing.T) { + b, err := Marshal(dyn.V("string")) + if assert.NoError(t, err) { + assert.JSONEq(t, `"string"`, string(b)) + } +} + +func TestMarshal_Bool(t *testing.T) { + b, err := Marshal(dyn.V(true)) + if assert.NoError(t, err) { + assert.JSONEq(t, `true`, string(b)) + } +} + +func TestMarshal_Int(t *testing.T) { + b, err := Marshal(dyn.V(42)) + if assert.NoError(t, err) { + assert.JSONEq(t, `42`, string(b)) + } +} + +func TestMarshal_Float(t *testing.T) { + b, err := Marshal(dyn.V(42.1)) + if assert.NoError(t, err) { + assert.JSONEq(t, `42.1`, string(b)) + } +} + +func TestMarshal_Time(t *testing.T) { + b, err := Marshal(dyn.V(dyn.MustTime("2021-01-01T00:00:00Z"))) + if assert.NoError(t, err) { + assert.JSONEq(t, `"2021-01-01T00:00:00Z"`, string(b)) + } +} + +func TestMarshal_Map(t *testing.T) { + m := dyn.NewMapping() + m.Set(dyn.V("key1"), dyn.V("value1")) + m.Set(dyn.V("key2"), dyn.V("value2")) + + b, err := Marshal(dyn.V(m)) + if assert.NoError(t, err) { + assert.JSONEq(t, `{"key1":"value1","key2":"value2"}`, string(b)) + } +} + +func TestMarshal_Sequence(t *testing.T) { + var s []dyn.Value + s = append(s, dyn.V("value1")) + s = append(s, dyn.V("value2")) + + b, err := Marshal(dyn.V(s)) + if assert.NoError(t, err) { + assert.JSONEq(t, `["value1","value2"]`, string(b)) + } +} + +func TestMarshal_Complex(t *testing.T) { + map1 := dyn.NewMapping() + map1.Set(dyn.V("str1"), dyn.V("value1")) + map1.Set(dyn.V("str2"), dyn.V("value2")) + + seq1 := []dyn.Value{} + seq1 = append(seq1, dyn.V("value1")) + seq1 = append(seq1, dyn.V("value2")) + + root := dyn.NewMapping() + root.Set(dyn.V("map1"), dyn.V(map1)) + root.Set(dyn.V("seq1"), dyn.V(seq1)) + + // Marshal without indent. + b, err := Marshal(dyn.V(root)) + if assert.NoError(t, err) { + assert.Equal(t, `{"map1":{"str1":"value1","str2":"value2"},"seq1":["value1","value2"]}`+"\n", string(b)) + } + + // Marshal with indent. + b, err = MarshalIndent(dyn.V(root), "", " ") + if assert.NoError(t, err) { + assert.Equal(t, `{ + "map1": { + "str1": "value1", + "str2": "value2" + }, + "seq1": [ + "value1", + "value2" + ] +}`+"\n", string(b)) + } +} diff --git a/libs/fakefs/fakefs.go b/libs/fakefs/fakefs.go new file mode 100644 index 00000000..a8d5eb87 --- /dev/null +++ b/libs/fakefs/fakefs.go @@ -0,0 +1,87 @@ +package fakefs + +import ( + "fmt" + "io/fs" + "time" +) + +var ErrNotImplemented = fmt.Errorf("not implemented") + +// DirEntry is a fake implementation of [fs.DirEntry]. +type DirEntry struct { + fs.FileInfo +} + +func (entry DirEntry) Type() fs.FileMode { + typ := fs.ModePerm + if entry.IsDir() { + typ |= fs.ModeDir + } + return typ +} + +func (entry DirEntry) Info() (fs.FileInfo, error) { + return entry.FileInfo, nil +} + +// FileInfo is a fake implementation of [fs.FileInfo]. +type FileInfo struct { + FakeName string + FakeSize int64 + FakeDir bool + FakeMode fs.FileMode +} + +func (info FileInfo) Name() string { + return info.FakeName +} + +func (info FileInfo) Size() int64 { + return info.FakeSize +} + +func (info FileInfo) Mode() fs.FileMode { + return info.FakeMode +} + +func (info FileInfo) ModTime() time.Time { + return time.Now() +} + +func (info FileInfo) IsDir() bool { + return info.FakeDir +} + +func (info FileInfo) Sys() any { + return nil +} + +// File is a fake implementation of [fs.File]. +type File struct { + FileInfo fs.FileInfo +} + +func (f File) Close() error { + return nil +} + +func (f File) Read(p []byte) (n int, err error) { + return 0, ErrNotImplemented +} + +func (f File) Stat() (fs.FileInfo, error) { + return f.FileInfo, nil +} + +// FS is a fake implementation of [fs.FS]. +type FS map[string]fs.File + +func (f FS) Open(name string) (fs.File, error) { + e, ok := f[name] + if !ok { + return nil, fs.ErrNotExist + } + + return e, nil +} diff --git a/libs/fakefs/fakefs_test.go b/libs/fakefs/fakefs_test.go new file mode 100644 index 00000000..b8919020 --- /dev/null +++ b/libs/fakefs/fakefs_test.go @@ -0,0 +1,38 @@ +package fakefs + +import ( + "io/fs" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestFile(t *testing.T) { + var fakefile fs.File = File{ + FileInfo: FileInfo{ + FakeName: "file", + }, + } + + _, err := fakefile.Read([]byte{}) + assert.ErrorIs(t, err, ErrNotImplemented) + + fi, err := fakefile.Stat() + assert.NoError(t, err) + assert.Equal(t, "file", fi.Name()) + + err = fakefile.Close() + assert.NoError(t, err) +} + +func TestFS(t *testing.T) { + var fakefs fs.FS = FS{ + "file": File{}, + } + + _, err := fakefs.Open("doesntexist") + assert.ErrorIs(t, err, fs.ErrNotExist) + + _, err = fakefs.Open("file") + assert.NoError(t, err) +} diff --git a/libs/filer/completer/completer_test.go b/libs/filer/completer/completer_test.go index c533f0b6..d284447b 100644 --- a/libs/filer/completer/completer_test.go +++ b/libs/filer/completer/completer_test.go @@ -6,6 +6,7 @@ import ( "testing" "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/fakefs" "github.com/databricks/cli/libs/filer" "github.com/databricks/databricks-sdk-go/experimental/mocks" "github.com/spf13/cobra" @@ -17,7 +18,7 @@ func setupCompleter(t *testing.T, onlyDirs bool) *completer { // Needed to make type context.valueCtx for mockFilerForPath ctx = root.SetWorkspaceClient(ctx, mocks.NewMockWorkspaceClient(t).WorkspaceClient) - fakeFiler := filer.NewFakeFiler(map[string]filer.FakeFileInfo{ + fakeFiler := filer.NewFakeFiler(map[string]fakefs.FileInfo{ "dir": {FakeName: "root", FakeDir: true}, "dir/dirA": {FakeDir: true}, "dir/dirB": {FakeDir: true}, diff --git a/libs/filer/fake_filer.go b/libs/filer/fake_filer.go index 0e650ff6..76b8bcd9 100644 --- a/libs/filer/fake_filer.go +++ b/libs/filer/fake_filer.go @@ -8,58 +8,12 @@ import ( "path" "sort" "strings" - "time" + + "github.com/databricks/cli/libs/fakefs" ) -type FakeDirEntry struct { - FakeFileInfo -} - -func (entry FakeDirEntry) Type() fs.FileMode { - typ := fs.ModePerm - if entry.FakeDir { - typ |= fs.ModeDir - } - return typ -} - -func (entry FakeDirEntry) Info() (fs.FileInfo, error) { - return entry.FakeFileInfo, nil -} - -type FakeFileInfo struct { - FakeName string - FakeSize int64 - FakeDir bool - FakeMode fs.FileMode -} - -func (info FakeFileInfo) Name() string { - return info.FakeName -} - -func (info FakeFileInfo) Size() int64 { - return info.FakeSize -} - -func (info FakeFileInfo) Mode() fs.FileMode { - return info.FakeMode -} - -func (info FakeFileInfo) ModTime() time.Time { - return time.Now() -} - -func (info FakeFileInfo) IsDir() bool { - return info.FakeDir -} - -func (info FakeFileInfo) Sys() any { - return nil -} - type FakeFiler struct { - entries map[string]FakeFileInfo + entries map[string]fakefs.FileInfo } func (f *FakeFiler) Write(ctx context.Context, p string, reader io.Reader, mode ...WriteMode) error { @@ -97,7 +51,7 @@ func (f *FakeFiler) ReadDir(ctx context.Context, p string) ([]fs.DirEntry, error continue } - out = append(out, FakeDirEntry{v}) + out = append(out, fakefs.DirEntry{FileInfo: v}) } sort.Slice(out, func(i, j int) bool { return out[i].Name() < out[j].Name() }) @@ -117,7 +71,11 @@ func (f *FakeFiler) Stat(ctx context.Context, path string) (fs.FileInfo, error) return entry, nil } -func NewFakeFiler(entries map[string]FakeFileInfo) *FakeFiler { +// NewFakeFiler creates a new fake [Filer] instance with the given entries. +// It sets the [Name] field of each entry to the base name of the path. +// +// This is meant to be used in tests. +func NewFakeFiler(entries map[string]fakefs.FileInfo) *FakeFiler { fakeFiler := &FakeFiler{ entries: entries, } diff --git a/libs/filer/fake_filer_test.go b/libs/filer/fake_filer_test.go new file mode 100644 index 00000000..fb536488 --- /dev/null +++ b/libs/filer/fake_filer_test.go @@ -0,0 +1,98 @@ +package filer + +import ( + "context" + "io" + "io/fs" + "testing" + + "github.com/databricks/cli/libs/fakefs" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestFakeFiler_Read(t *testing.T) { + f := NewFakeFiler(map[string]fakefs.FileInfo{ + "file": {}, + }) + + ctx := context.Background() + r, err := f.Read(ctx, "file") + require.NoError(t, err) + contents, err := io.ReadAll(r) + require.NoError(t, err) + + // Contents of every file is "foo". + assert.Equal(t, "foo", string(contents)) +} + +func TestFakeFiler_Read_NotFound(t *testing.T) { + f := NewFakeFiler(map[string]fakefs.FileInfo{ + "foo": {}, + }) + + ctx := context.Background() + _, err := f.Read(ctx, "bar") + assert.ErrorIs(t, err, fs.ErrNotExist) +} + +func TestFakeFiler_ReadDir_NotFound(t *testing.T) { + f := NewFakeFiler(map[string]fakefs.FileInfo{ + "dir1": {FakeDir: true}, + }) + + ctx := context.Background() + _, err := f.ReadDir(ctx, "dir2") + assert.ErrorIs(t, err, fs.ErrNotExist) +} + +func TestFakeFiler_ReadDir_NotADirectory(t *testing.T) { + f := NewFakeFiler(map[string]fakefs.FileInfo{ + "file": {}, + }) + + ctx := context.Background() + _, err := f.ReadDir(ctx, "file") + assert.ErrorIs(t, err, fs.ErrInvalid) +} + +func TestFakeFiler_ReadDir(t *testing.T) { + f := NewFakeFiler(map[string]fakefs.FileInfo{ + "dir1": {FakeDir: true}, + "dir1/file2": {}, + "dir1/dir2": {FakeDir: true}, + }) + + ctx := context.Background() + entries, err := f.ReadDir(ctx, "dir1/") + require.NoError(t, err) + require.Len(t, entries, 2) + + // The entries are sorted by name. + assert.Equal(t, "dir2", entries[0].Name()) + assert.True(t, entries[0].IsDir()) + assert.Equal(t, "file2", entries[1].Name()) + assert.False(t, entries[1].IsDir()) +} + +func TestFakeFiler_Stat(t *testing.T) { + f := NewFakeFiler(map[string]fakefs.FileInfo{ + "file": {}, + }) + + ctx := context.Background() + info, err := f.Stat(ctx, "file") + require.NoError(t, err) + + assert.Equal(t, "file", info.Name()) +} + +func TestFakeFiler_Stat_NotFound(t *testing.T) { + f := NewFakeFiler(map[string]fakefs.FileInfo{ + "foo": {}, + }) + + ctx := context.Background() + _, err := f.Stat(ctx, "bar") + assert.ErrorIs(t, err, fs.ErrNotExist) +} diff --git a/libs/filer/fs_test.go b/libs/filer/fs_test.go index a74c10f0..849cf6f7 100644 --- a/libs/filer/fs_test.go +++ b/libs/filer/fs_test.go @@ -6,6 +6,7 @@ import ( "io/fs" "testing" + "github.com/databricks/cli/libs/fakefs" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -35,7 +36,7 @@ func TestFsDirImplementsFsReadDirFile(t *testing.T) { } func fakeFS() fs.FS { - fakeFiler := NewFakeFiler(map[string]FakeFileInfo{ + fakeFiler := NewFakeFiler(map[string]fakefs.FileInfo{ ".": {FakeName: "root", FakeDir: true}, "dirA": {FakeDir: true}, "dirB": {FakeDir: true}, diff --git a/libs/filer/workspace_files_extensions_client.go b/libs/filer/workspace_files_extensions_client.go index b24ecf7e..53b77dd5 100644 --- a/libs/filer/workspace_files_extensions_client.go +++ b/libs/filer/workspace_files_extensions_client.go @@ -7,6 +7,7 @@ import ( "io" "io/fs" "path" + "slices" "strings" "github.com/databricks/cli/libs/log" @@ -23,14 +24,6 @@ type workspaceFilesExtensionsClient struct { readonly bool } -var extensionsToLanguages = map[string]workspace.Language{ - ".py": workspace.LanguagePython, - ".r": workspace.LanguageR, - ".scala": workspace.LanguageScala, - ".sql": workspace.LanguageSql, - ".ipynb": workspace.LanguagePython, -} - type workspaceFileStatus struct { wsfsFileInfo @@ -54,7 +47,12 @@ func (w *workspaceFilesExtensionsClient) getNotebookStatByNameWithExt(ctx contex nameWithoutExt := strings.TrimSuffix(name, ext) // File name does not have an extension associated with Databricks notebooks, return early. - if _, ok := extensionsToLanguages[ext]; !ok { + if !slices.Contains([]string{ + notebook.ExtensionPython, + notebook.ExtensionR, + notebook.ExtensionScala, + notebook.ExtensionSql, + notebook.ExtensionJupyter}, ext) { return nil, nil } @@ -75,22 +73,23 @@ func (w *workspaceFilesExtensionsClient) getNotebookStatByNameWithExt(ctx contex return nil, nil } - // Not the correct language. Return early. - if stat.Language != extensionsToLanguages[ext] { - log.Debugf(ctx, "attempting to determine if %s could be a notebook. Found a notebook at %s but it is not of the correct language. Expected %s but found %s.", name, path.Join(w.root, nameWithoutExt), extensionsToLanguages[ext], stat.Language) + // Not the correct language. Return early. Note: All languages are supported + // for Jupyter notebooks. + if ext != notebook.ExtensionJupyter && stat.Language != notebook.ExtensionToLanguage[ext] { + log.Debugf(ctx, "attempting to determine if %s could be a notebook. Found a notebook at %s but it is not of the correct language. Expected %s but found %s.", name, path.Join(w.root, nameWithoutExt), notebook.ExtensionToLanguage[ext], stat.Language) return nil, nil } - // When the extension is .py we expect the export format to be source. + // For non-jupyter notebooks the export format should be source. // If it's not, return early. - if ext == ".py" && stat.ReposExportFormat != workspace.ExportFormatSource { + if ext != notebook.ExtensionJupyter && stat.ReposExportFormat != workspace.ExportFormatSource { log.Debugf(ctx, "attempting to determine if %s could be a notebook. Found a notebook at %s but it is not exported as a source notebook. Its export format is %s.", name, path.Join(w.root, nameWithoutExt), stat.ReposExportFormat) return nil, nil } // When the extension is .ipynb we expect the export format to be Jupyter. // If it's not, return early. - if ext == ".ipynb" && stat.ReposExportFormat != workspace.ExportFormatJupyter { + if ext == notebook.ExtensionJupyter && stat.ReposExportFormat != workspace.ExportFormatJupyter { log.Debugf(ctx, "attempting to determine if %s could be a notebook. Found a notebook at %s but it is not exported as a Jupyter notebook. Its export format is %s.", name, path.Join(w.root, nameWithoutExt), stat.ReposExportFormat) return nil, nil } @@ -120,8 +119,8 @@ func (w *workspaceFilesExtensionsClient) getNotebookStatByNameWithoutExt(ctx con ext := notebook.GetExtensionByLanguage(&stat.ObjectInfo) // If the notebook was exported as a Jupyter notebook, the extension should be .ipynb. - if stat.Language == workspace.LanguagePython && stat.ReposExportFormat == workspace.ExportFormatJupyter { - ext = ".ipynb" + if stat.ReposExportFormat == workspace.ExportFormatJupyter { + ext = notebook.ExtensionJupyter } // Modify the stat object path to include the extension. This stat object will be used diff --git a/libs/filer/workspace_files_extensions_client_test.go b/libs/filer/workspace_files_extensions_client_test.go index 321c4371..974a6a37 100644 --- a/libs/filer/workspace_files_extensions_client_test.go +++ b/libs/filer/workspace_files_extensions_client_test.go @@ -37,7 +37,7 @@ func TestFilerWorkspaceFilesExtensionsErrorsOnDupName(t *testing.T) { expectedError string }{ { - name: "python source notebook and file", + name: "python source notebook and file with source extension", language: workspace.LanguagePython, notebookExportFormat: workspace.ExportFormatSource, notebookPath: "/dir/foo", @@ -45,7 +45,31 @@ func TestFilerWorkspaceFilesExtensionsErrorsOnDupName(t *testing.T) { expectedError: "failed to read files from the workspace file system. Duplicate paths encountered. Both NOTEBOOK at /dir/foo and FILE at /dir/foo.py resolve to the same name /foo.py. Changing the name of one of these objects will resolve this issue", }, { - name: "python jupyter notebook and file", + name: "scala source notebook and file with source extension", + language: workspace.LanguageScala, + notebookExportFormat: workspace.ExportFormatSource, + notebookPath: "/dir/foo", + filePath: "/dir/foo.scala", + expectedError: "failed to read files from the workspace file system. Duplicate paths encountered. Both NOTEBOOK at /dir/foo and FILE at /dir/foo.scala resolve to the same name /foo.scala. Changing the name of one of these objects will resolve this issue", + }, + { + name: "r source notebook and file with source extension", + language: workspace.LanguageR, + notebookExportFormat: workspace.ExportFormatSource, + notebookPath: "/dir/foo", + filePath: "/dir/foo.r", + expectedError: "failed to read files from the workspace file system. Duplicate paths encountered. Both NOTEBOOK at /dir/foo and FILE at /dir/foo.r resolve to the same name /foo.r. Changing the name of one of these objects will resolve this issue", + }, + { + name: "sql source notebook and file with source extension", + language: workspace.LanguageSql, + notebookExportFormat: workspace.ExportFormatSource, + notebookPath: "/dir/foo", + filePath: "/dir/foo.sql", + expectedError: "failed to read files from the workspace file system. Duplicate paths encountered. Both NOTEBOOK at /dir/foo and FILE at /dir/foo.sql resolve to the same name /foo.sql. Changing the name of one of these objects will resolve this issue", + }, + { + name: "python jupyter notebook and file with source extension", language: workspace.LanguagePython, notebookExportFormat: workspace.ExportFormatJupyter, notebookPath: "/dir/foo", @@ -54,37 +78,64 @@ func TestFilerWorkspaceFilesExtensionsErrorsOnDupName(t *testing.T) { expectedError: "", }, { - name: "scala source notebook and file", + name: "scala jupyter notebook and file with source extension", language: workspace.LanguageScala, - notebookExportFormat: workspace.ExportFormatSource, + notebookExportFormat: workspace.ExportFormatJupyter, notebookPath: "/dir/foo", filePath: "/dir/foo.scala", - expectedError: "failed to read files from the workspace file system. Duplicate paths encountered. Both NOTEBOOK at /dir/foo and FILE at /dir/foo.scala resolve to the same name /foo.scala. Changing the name of one of these objects will resolve this issue", + // Jupyter notebooks would correspond to foo.ipynb so an error is not expected. + expectedError: "", }, { - name: "r source notebook and file", - language: workspace.LanguageR, - notebookExportFormat: workspace.ExportFormatSource, - notebookPath: "/dir/foo", - filePath: "/dir/foo.r", - expectedError: "failed to read files from the workspace file system. Duplicate paths encountered. Both NOTEBOOK at /dir/foo and FILE at /dir/foo.r resolve to the same name /foo.r. Changing the name of one of these objects will resolve this issue", - }, - { - name: "sql source notebook and file", + name: "sql jupyter notebook and file with source extension", language: workspace.LanguageSql, - notebookExportFormat: workspace.ExportFormatSource, + notebookExportFormat: workspace.ExportFormatJupyter, notebookPath: "/dir/foo", filePath: "/dir/foo.sql", - expectedError: "failed to read files from the workspace file system. Duplicate paths encountered. Both NOTEBOOK at /dir/foo and FILE at /dir/foo.sql resolve to the same name /foo.sql. Changing the name of one of these objects will resolve this issue", + // Jupyter notebooks would correspond to foo.ipynb so an error is not expected. + expectedError: "", }, { - name: "python jupyter notebook and file", + name: "r jupyter notebook and file with source extension", + language: workspace.LanguageR, + notebookExportFormat: workspace.ExportFormatJupyter, + notebookPath: "/dir/foo", + filePath: "/dir/foo.sql", + // Jupyter notebooks would correspond to foo.ipynb so an error is not expected. + expectedError: "", + }, + { + name: "python jupyter notebook and file with .ipynb extension", language: workspace.LanguagePython, notebookExportFormat: workspace.ExportFormatJupyter, notebookPath: "/dir/foo", filePath: "/dir/foo.ipynb", expectedError: "failed to read files from the workspace file system. Duplicate paths encountered. Both NOTEBOOK at /dir/foo and FILE at /dir/foo.ipynb resolve to the same name /foo.ipynb. Changing the name of one of these objects will resolve this issue", }, + { + name: "scala jupyter notebook and file with .ipynb extension", + language: workspace.LanguageScala, + notebookExportFormat: workspace.ExportFormatJupyter, + notebookPath: "/dir/foo", + filePath: "/dir/foo.ipynb", + expectedError: "failed to read files from the workspace file system. Duplicate paths encountered. Both NOTEBOOK at /dir/foo and FILE at /dir/foo.ipynb resolve to the same name /foo.ipynb. Changing the name of one of these objects will resolve this issue", + }, + { + name: "r jupyter notebook and file with .ipynb extension", + language: workspace.LanguageR, + notebookExportFormat: workspace.ExportFormatJupyter, + notebookPath: "/dir/foo", + filePath: "/dir/foo.ipynb", + expectedError: "failed to read files from the workspace file system. Duplicate paths encountered. Both NOTEBOOK at /dir/foo and FILE at /dir/foo.ipynb resolve to the same name /foo.ipynb. Changing the name of one of these objects will resolve this issue", + }, + { + name: "sql jupyter notebook and file with .ipynb extension", + language: workspace.LanguageSql, + notebookExportFormat: workspace.ExportFormatJupyter, + notebookPath: "/dir/foo", + filePath: "/dir/foo.ipynb", + expectedError: "failed to read files from the workspace file system. Duplicate paths encountered. Both NOTEBOOK at /dir/foo and FILE at /dir/foo.ipynb resolve to the same name /foo.ipynb. Changing the name of one of these objects will resolve this issue", + }, } { t.Run(tc.name, func(t *testing.T) { mockedWorkspaceClient := mocks.NewMockWorkspaceClient(t) diff --git a/libs/notebook/detect.go b/libs/notebook/detect.go index 582a8847..cd8680bf 100644 --- a/libs/notebook/detect.go +++ b/libs/notebook/detect.go @@ -107,19 +107,19 @@ func DetectWithFS(fsys fs.FS, name string) (notebook bool, language workspace.La // Determine which header to expect based on filename extension. ext := strings.ToLower(filepath.Ext(name)) switch ext { - case ".py": + case ExtensionPython: header = `# Databricks notebook source` language = workspace.LanguagePython - case ".r": + case ExtensionR: header = `# Databricks notebook source` language = workspace.LanguageR - case ".scala": + case ExtensionScala: header = "// Databricks notebook source" language = workspace.LanguageScala - case ".sql": + case ExtensionSql: header = "-- Databricks notebook source" language = workspace.LanguageSql - case ".ipynb": + case ExtensionJupyter: return DetectJupyterWithFS(fsys, name) default: return false, "", nil diff --git a/libs/notebook/detect_test.go b/libs/notebook/detect_test.go index ad89d6dd..786c7e39 100644 --- a/libs/notebook/detect_test.go +++ b/libs/notebook/detect_test.go @@ -7,6 +7,7 @@ import ( "path/filepath" "testing" + "github.com/databricks/cli/libs/fakefs" "github.com/databricks/databricks-sdk-go/service/workspace" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -100,11 +101,21 @@ func TestDetectFileWithLongHeader(t *testing.T) { assert.False(t, nb) } +type fileInfoWithWorkspaceInfo struct { + fakefs.FileInfo + + oi workspace.ObjectInfo +} + +func (f fileInfoWithWorkspaceInfo) WorkspaceObjectInfo() workspace.ObjectInfo { + return f.oi +} + func TestDetectWithObjectInfo(t *testing.T) { - fakeFS := &fakeFS{ - fakeFile{ - fakeFileInfo{ - workspace.ObjectInfo{ + fakefs := fakefs.FS{ + "file.py": fakefs.File{ + FileInfo: fileInfoWithWorkspaceInfo{ + oi: workspace.ObjectInfo{ ObjectType: workspace.ObjectTypeNotebook, Language: workspace.LanguagePython, }, @@ -112,7 +123,7 @@ func TestDetectWithObjectInfo(t *testing.T) { }, } - nb, lang, err := DetectWithFS(fakeFS, "doesntmatter") + nb, lang, err := DetectWithFS(fakefs, "file.py") require.NoError(t, err) assert.True(t, nb) assert.Equal(t, workspace.LanguagePython, lang) diff --git a/libs/notebook/ext.go b/libs/notebook/ext.go index 28d08c11..c34ad2cc 100644 --- a/libs/notebook/ext.go +++ b/libs/notebook/ext.go @@ -2,22 +2,40 @@ package notebook import "github.com/databricks/databricks-sdk-go/service/workspace" +const ( + ExtensionNone string = "" + ExtensionPython string = ".py" + ExtensionR string = ".r" + ExtensionScala string = ".scala" + ExtensionSql string = ".sql" + ExtensionJupyter string = ".ipynb" +) + +var ExtensionToLanguage = map[string]workspace.Language{ + ExtensionPython: workspace.LanguagePython, + ExtensionR: workspace.LanguageR, + ExtensionScala: workspace.LanguageScala, + ExtensionSql: workspace.LanguageSql, + + // The platform supports all languages (Python, R, Scala, and SQL) for Jupyter notebooks. +} + func GetExtensionByLanguage(objectInfo *workspace.ObjectInfo) string { if objectInfo.ObjectType != workspace.ObjectTypeNotebook { - return "" + return ExtensionNone } switch objectInfo.Language { case workspace.LanguagePython: - return ".py" + return ExtensionPython case workspace.LanguageR: - return ".r" + return ExtensionR case workspace.LanguageScala: - return ".scala" + return ExtensionScala case workspace.LanguageSql: - return ".sql" + return ExtensionSql default: // Do not add any extension to the file name - return "" + return ExtensionNone } } diff --git a/libs/notebook/fakefs_test.go b/libs/notebook/fakefs_test.go deleted file mode 100644 index 4ac135dd..00000000 --- a/libs/notebook/fakefs_test.go +++ /dev/null @@ -1,77 +0,0 @@ -package notebook - -import ( - "fmt" - "io/fs" - "time" - - "github.com/databricks/databricks-sdk-go/service/workspace" -) - -type fakeFS struct { - fakeFile -} - -type fakeFile struct { - fakeFileInfo -} - -func (f fakeFile) Close() error { - return nil -} - -func (f fakeFile) Read(p []byte) (n int, err error) { - return 0, fmt.Errorf("not implemented") -} - -func (f fakeFile) Stat() (fs.FileInfo, error) { - return f.fakeFileInfo, nil -} - -type fakeFileInfo struct { - oi workspace.ObjectInfo -} - -func (f fakeFileInfo) WorkspaceObjectInfo() workspace.ObjectInfo { - return f.oi -} - -func (f fakeFileInfo) Name() string { - return "" -} - -func (f fakeFileInfo) Size() int64 { - return 0 -} - -func (f fakeFileInfo) Mode() fs.FileMode { - return 0 -} - -func (f fakeFileInfo) ModTime() time.Time { - return time.Time{} -} - -func (f fakeFileInfo) IsDir() bool { - return false -} - -func (f fakeFileInfo) Sys() any { - return nil -} - -func (f fakeFS) Open(name string) (fs.File, error) { - return f.fakeFile, nil -} - -func (f fakeFS) Stat(name string) (fs.FileInfo, error) { - panic("not implemented") -} - -func (f fakeFS) ReadDir(name string) ([]fs.DirEntry, error) { - panic("not implemented") -} - -func (f fakeFS) ReadFile(name string) ([]byte, error) { - panic("not implemented") -} diff --git a/libs/process/opts.go b/libs/process/opts.go index 9516e49b..dd066751 100644 --- a/libs/process/opts.go +++ b/libs/process/opts.go @@ -6,6 +6,7 @@ import ( "fmt" "io" "os/exec" + "sync" ) type execOption func(context.Context, *exec.Cmd) error @@ -69,10 +70,24 @@ func WithStdoutWriter(dst io.Writer) execOption { } } +// safeWriter is a writer that is safe to use concurrently. +// It serializes writes to the underlying writer. +type safeWriter struct { + w io.Writer + m sync.Mutex +} + +func (s *safeWriter) Write(p []byte) (n int, err error) { + s.m.Lock() + defer s.m.Unlock() + return s.w.Write(p) +} + func WithCombinedOutput(buf *bytes.Buffer) execOption { + sw := &safeWriter{w: buf} return func(_ context.Context, c *exec.Cmd) error { - c.Stdout = io.MultiWriter(buf, c.Stdout) - c.Stderr = io.MultiWriter(buf, c.Stderr) + c.Stdout = io.MultiWriter(sw, c.Stdout) + c.Stderr = io.MultiWriter(sw, c.Stderr) return nil } } diff --git a/libs/template/templates/default-python/template/{{.project_name}}/resources/{{.project_name}}.pipeline.yml.tmpl b/libs/template/templates/default-python/template/{{.project_name}}/resources/{{.project_name}}.pipeline.yml.tmpl index 50e5ad97..1c6b8607 100644 --- a/libs/template/templates/default-python/template/{{.project_name}}/resources/{{.project_name}}.pipeline.yml.tmpl +++ b/libs/template/templates/default-python/template/{{.project_name}}/resources/{{.project_name}}.pipeline.yml.tmpl @@ -3,7 +3,7 @@ resources: pipelines: {{.project_name}}_pipeline: name: {{.project_name}}_pipeline - {{- if eq default_catalog ""}} + {{- if or (eq default_catalog "") (eq default_catalog "hive_metastore")}} ## Specify the 'catalog' field to configure this pipeline to make use of Unity Catalog: # catalog: catalog_name {{- else}}