mirror of https://github.com/databricks/cli.git
Merge remote-tracking branch 'origin' into remove-uncessary-get
This commit is contained in:
commit
9284fa1e1e
|
@ -1 +1 @@
|
|||
0c86ea6dbd9a730c24ff0d4e509603e476955ac5
|
||||
d25296d2f4aa7bd6195c816fdf82e0f960f775da
|
|
@ -115,6 +115,9 @@ func new{{.PascalName}}() *cobra.Command {
|
|||
{{- if .Request}}
|
||||
|
||||
var {{.CamelName}}Req {{.Service.Package.Name}}.{{.Request.PascalName}}
|
||||
{{- if .RequestBodyField }}
|
||||
{{.CamelName}}Req.{{.RequestBodyField.PascalName}} = &{{.Service.Package.Name}}.{{.RequestBodyField.Entity.PascalName}}{}
|
||||
{{- end }}
|
||||
{{- if .CanUseJson}}
|
||||
var {{.CamelName}}Json flags.JsonFlag
|
||||
{{- end}}
|
||||
|
@ -127,21 +130,27 @@ func new{{.PascalName}}() *cobra.Command {
|
|||
cmd.Flags().BoolVar(&{{.CamelName}}SkipWait, "no-wait", {{.CamelName}}SkipWait, `do not wait to reach {{range $i, $e := .Wait.Success}}{{if $i}} or {{end}}{{.Content}}{{end}} state`)
|
||||
cmd.Flags().DurationVar(&{{.CamelName}}Timeout, "timeout", {{.Wait.Timeout}}*time.Minute, `maximum amount of time to reach {{range $i, $e := .Wait.Success}}{{if $i}} or {{end}}{{.Content}}{{end}} state`)
|
||||
{{end -}}
|
||||
{{if .Request}}// TODO: short flags
|
||||
{{- $request := .Request -}}
|
||||
{{- if .RequestBodyField -}}
|
||||
{{- $request = .RequestBodyField.Entity -}}
|
||||
{{- end -}}
|
||||
{{if $request }}// TODO: short flags
|
||||
{{- if .CanUseJson}}
|
||||
cmd.Flags().Var(&{{.CamelName}}Json, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||
{{- end}}
|
||||
{{$method := .}}
|
||||
{{ if not .IsJsonOnly }}
|
||||
{{range .Request.Fields -}}
|
||||
{{range $request.Fields -}}
|
||||
{{- if not .Required -}}
|
||||
{{if .Entity.IsObject }}// TODO: complex arg: {{.Name}}
|
||||
{{else if .Entity.IsAny }}// TODO: any: {{.Name}}
|
||||
{{else if .Entity.ArrayValue }}// TODO: array: {{.Name}}
|
||||
{{else if .Entity.MapValue }}// TODO: map via StringToStringVar: {{.Name}}
|
||||
{{else if .Entity.IsEmpty }}// TODO: output-only field
|
||||
{{else if .Entity.Enum }}cmd.Flags().Var(&{{$method.CamelName}}Req.{{.PascalName}}, "{{.KebabName}}", `{{.Summary | without "`" | trimSuffix "."}}. Supported values: {{template "printArray" .Entity.Enum}}`)
|
||||
{{else}}cmd.Flags().{{template "arg-type" .Entity}}(&{{$method.CamelName}}Req.{{.PascalName}}, "{{.KebabName}}", {{$method.CamelName}}Req.{{.PascalName}}, `{{.Summary | without "`"}}`)
|
||||
{{else if .Entity.IsComputed -}}
|
||||
{{else if .IsOutputOnly -}}
|
||||
{{else if .Entity.Enum }}cmd.Flags().Var(&{{- template "request-body-obj" (dict "Method" $method "Field" .)}}, "{{.KebabName}}", `{{.Summary | without "`" | trimSuffix "."}}. Supported values: {{template "printArray" .Entity.Enum}}`)
|
||||
{{else}}cmd.Flags().{{template "arg-type" .Entity}}(&{{- template "request-body-obj" (dict "Method" $method "Field" .)}}, "{{.KebabName}}", {{- template "request-body-obj" (dict "Method" $method "Field" .)}}, `{{.Summary | without "`"}}`)
|
||||
{{end}}
|
||||
{{- end -}}
|
||||
{{- end}}
|
||||
|
@ -161,14 +170,14 @@ func new{{.PascalName}}() *cobra.Command {
|
|||
{{- $noPrompt := or .IsCrudCreate (in $excludeFromPrompts $fullCommandName) }}
|
||||
|
||||
{{- $hasPosArgs := .HasRequiredPositionalArguments -}}
|
||||
{{- $hasSinglePosArg := and $hasPosArgs (eq 1 (len .Request.RequiredFields)) -}}
|
||||
{{- $hasSinglePosArg := and $hasPosArgs (eq 1 (len $request.RequiredFields)) -}}
|
||||
{{- $serviceHasNamedIdMap := and (and .Service.List .Service.List.NamedIdMap) (not (eq .PascalName "List")) -}}
|
||||
{{- $hasIdPrompt := and (not $noPrompt) (and $hasSinglePosArg $serviceHasNamedIdMap) -}}
|
||||
{{- $wait := and .Wait (and (not .IsCrudRead) (not (eq .SnakeName "get_run"))) -}}
|
||||
{{- $hasRequiredArgs := and (not $hasIdPrompt) $hasPosArgs -}}
|
||||
{{- $hasSingleRequiredRequestBodyFieldWithPrompt := and (and $hasIdPrompt .Request) (eq 1 (len .Request.RequiredRequestBodyFields)) -}}
|
||||
{{- $onlyPathArgsRequiredAsPositionalArguments := and .Request (eq (len .RequiredPositionalArguments) (len .Request.RequiredPathFields)) -}}
|
||||
{{- $hasDifferentArgsWithJsonFlag := and (not $onlyPathArgsRequiredAsPositionalArguments) (and .CanUseJson .Request.HasRequiredRequestBodyFields) -}}
|
||||
{{- $hasSingleRequiredRequestBodyFieldWithPrompt := and (and $hasIdPrompt $request) (eq 1 (len $request.RequiredRequestBodyFields)) -}}
|
||||
{{- $onlyPathArgsRequiredAsPositionalArguments := and $request (eq (len .RequiredPositionalArguments) (len $request.RequiredPathFields)) -}}
|
||||
{{- $hasDifferentArgsWithJsonFlag := and (not $onlyPathArgsRequiredAsPositionalArguments) (and .CanUseJson (or $request.HasRequiredRequestBodyFields )) -}}
|
||||
{{- $hasCustomArgHandler := or $hasRequiredArgs $hasDifferentArgsWithJsonFlag -}}
|
||||
|
||||
{{- $atleastOneArgumentWithDescription := false -}}
|
||||
|
@ -206,12 +215,12 @@ func new{{.PascalName}}() *cobra.Command {
|
|||
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
||||
{{- if $hasDifferentArgsWithJsonFlag }}
|
||||
if cmd.Flags().Changed("json") {
|
||||
err := root.ExactArgs({{len .Request.RequiredPathFields}})(cmd, args)
|
||||
err := root.ExactArgs({{len $request.RequiredPathFields}})(cmd, args)
|
||||
if err != nil {
|
||||
{{- if eq 0 (len .Request.RequiredPathFields) }}
|
||||
return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide{{- range $index, $field := .Request.RequiredFields}}{{if $index}},{{end}} '{{$field.Name}}'{{end}} in your JSON input")
|
||||
{{- if eq 0 (len $request.RequiredPathFields) }}
|
||||
return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide{{- range $index, $field := $request.RequiredFields}}{{if $index}},{{end}} '{{$field.Name}}'{{end}} in your JSON input")
|
||||
{{- else }}
|
||||
return fmt.Errorf("when --json flag is specified, provide only{{- range $index, $field := .Request.RequiredPathFields}}{{if $index}},{{end}} {{$field.ConstantName}}{{end}} as positional arguments. Provide{{- range $index, $field := .Request.RequiredRequestBodyFields}}{{if $index}},{{end}} '{{$field.Name}}'{{end}} in your JSON input")
|
||||
return fmt.Errorf("when --json flag is specified, provide only{{- range $index, $field := $request.RequiredPathFields}}{{if $index}},{{end}} {{$field.ConstantName}}{{end}} as positional arguments. Provide{{- range $index, $field := $request.RequiredRequestBodyFields}}{{if $index}},{{end}} '{{$field.Name}}'{{end}} in your JSON input")
|
||||
{{- end }}
|
||||
}
|
||||
return nil
|
||||
|
@ -232,7 +241,7 @@ func new{{.PascalName}}() *cobra.Command {
|
|||
{{- if .Request }}
|
||||
{{ if .CanUseJson }}
|
||||
if cmd.Flags().Changed("json") {
|
||||
diags := {{.CamelName}}Json.Unmarshal(&{{.CamelName}}Req)
|
||||
diags := {{.CamelName}}Json.Unmarshal(&{{.CamelName}}Req{{ if .RequestBodyField }}.{{.RequestBodyField.PascalName}}{{ end }})
|
||||
if diags.HasError() {
|
||||
return diags.Error()
|
||||
}
|
||||
|
@ -251,20 +260,20 @@ func new{{.PascalName}}() *cobra.Command {
|
|||
{{- if $hasIdPrompt}}
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No{{range .Request.RequiredFields}} {{.ConstantName}}{{end}} argument specified. Loading names for {{.Service.TitleName}} drop-down."
|
||||
promptSpinner <- "No{{range $request.RequiredFields}} {{.ConstantName}}{{end}} argument specified. Loading names for {{.Service.TitleName}} drop-down."
|
||||
names, err := {{if .Service.IsAccounts}}a{{else}}w{{end}}.{{(.Service.TrimPrefix "account").PascalName}}.{{.Service.List.NamedIdMap.PascalName}}(ctx{{if .Service.List.Request}}, {{.Service.Package.Name}}.{{.Service.List.Request.PascalName}}{}{{end}})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to load names for {{.Service.TitleName}} drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "{{range .Request.RequiredFields}}{{.Summary | trimSuffix "."}}{{end}}")
|
||||
id, err := cmdio.Select(ctx, names, "{{range $request.RequiredFields}}{{.Summary | trimSuffix "."}}{{end}}")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
args = append(args, id)
|
||||
}
|
||||
if len(args) != 1 {
|
||||
return fmt.Errorf("expected to have {{range .Request.RequiredFields}}{{.Summary | trimSuffix "." | lower}}{{end}}")
|
||||
return fmt.Errorf("expected to have {{range $request.RequiredFields}}{{.Summary | trimSuffix "." | lower}}{{end}}")
|
||||
}
|
||||
{{- end -}}
|
||||
|
||||
|
@ -388,13 +397,19 @@ func new{{.PascalName}}() *cobra.Command {
|
|||
if !cmd.Flags().Changed("json") {
|
||||
{{- end }}
|
||||
{{if not $field.Entity.IsString -}}
|
||||
_, err = fmt.Sscan(args[{{$arg}}], &{{$method.CamelName}}Req.{{$field.PascalName}})
|
||||
_, err = fmt.Sscan(args[{{$arg}}], &{{- template "request-body-obj" (dict "Method" $method "Field" $field)}})
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid {{$field.ConstantName}}: %s", args[{{$arg}}])
|
||||
}{{else -}}
|
||||
{{$method.CamelName}}Req.{{$field.PascalName}} = args[{{$arg}}]
|
||||
{{- template "request-body-obj" (dict "Method" $method "Field" $field)}} = args[{{$arg}}]
|
||||
{{- end -}}
|
||||
{{- if $optionalIfJsonIsUsed }}
|
||||
}
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
|
||||
{{- define "request-body-obj" -}}
|
||||
{{- $method := .Method -}}
|
||||
{{- $field := .Field -}}
|
||||
{{$method.CamelName}}Req{{ if (and $method.RequestBodyField (not $field.IsPath)) }}.{{$method.RequestBodyField.PascalName}}{{end}}.{{$field.PascalName}}
|
||||
{{- end -}}
|
||||
|
|
|
@ -30,13 +30,14 @@ cmd/account/users/users.go linguist-generated=true
|
|||
cmd/account/vpc-endpoints/vpc-endpoints.go linguist-generated=true
|
||||
cmd/account/workspace-assignment/workspace-assignment.go linguist-generated=true
|
||||
cmd/account/workspaces/workspaces.go linguist-generated=true
|
||||
cmd/workspace/aibi-dashboard-embedding-access-policy/aibi-dashboard-embedding-access-policy.go linguist-generated=true
|
||||
cmd/workspace/aibi-dashboard-embedding-approved-domains/aibi-dashboard-embedding-approved-domains.go linguist-generated=true
|
||||
cmd/workspace/alerts-legacy/alerts-legacy.go linguist-generated=true
|
||||
cmd/workspace/alerts/alerts.go linguist-generated=true
|
||||
cmd/workspace/apps/apps.go linguist-generated=true
|
||||
cmd/workspace/artifact-allowlists/artifact-allowlists.go linguist-generated=true
|
||||
cmd/workspace/automatic-cluster-update/automatic-cluster-update.go linguist-generated=true
|
||||
cmd/workspace/catalogs/catalogs.go linguist-generated=true
|
||||
cmd/workspace/clean-rooms/clean-rooms.go linguist-generated=true
|
||||
cmd/workspace/cluster-policies/cluster-policies.go linguist-generated=true
|
||||
cmd/workspace/clusters/clusters.go linguist-generated=true
|
||||
cmd/workspace/cmd.go linguist-generated=true
|
||||
|
@ -48,12 +49,14 @@ cmd/workspace/consumer-listings/consumer-listings.go linguist-generated=true
|
|||
cmd/workspace/consumer-personalization-requests/consumer-personalization-requests.go linguist-generated=true
|
||||
cmd/workspace/consumer-providers/consumer-providers.go linguist-generated=true
|
||||
cmd/workspace/credentials-manager/credentials-manager.go linguist-generated=true
|
||||
cmd/workspace/credentials/credentials.go linguist-generated=true
|
||||
cmd/workspace/current-user/current-user.go linguist-generated=true
|
||||
cmd/workspace/dashboard-widgets/dashboard-widgets.go linguist-generated=true
|
||||
cmd/workspace/dashboards/dashboards.go linguist-generated=true
|
||||
cmd/workspace/data-sources/data-sources.go linguist-generated=true
|
||||
cmd/workspace/default-namespace/default-namespace.go linguist-generated=true
|
||||
cmd/workspace/disable-legacy-access/disable-legacy-access.go linguist-generated=true
|
||||
cmd/workspace/disable-legacy-dbfs/disable-legacy-dbfs.go linguist-generated=true
|
||||
cmd/workspace/enhanced-security-monitoring/enhanced-security-monitoring.go linguist-generated=true
|
||||
cmd/workspace/experiments/experiments.go linguist-generated=true
|
||||
cmd/workspace/external-locations/external-locations.go linguist-generated=true
|
||||
|
|
|
@ -0,0 +1,56 @@
|
|||
name: PR Comment
|
||||
|
||||
# WARNING:
|
||||
# THIS WORKFLOW ALWAYS RUNS FOR EXTERNAL CONTRIBUTORS WITHOUT ANY APPROVAL.
|
||||
# THIS WORKFLOW RUNS FROM MAIN BRANCH, NOT FROM THE PR BRANCH.
|
||||
# DO NOT PULL THE PR OR EXECUTE ANY CODE FROM THE PR.
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
types: [opened, reopened, synchronize]
|
||||
branches:
|
||||
- main
|
||||
|
||||
jobs:
|
||||
comment-on-pr:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
pull-requests: write
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Delete old comments
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
# Delete previous comment if it exists
|
||||
previous_comment_ids=$(gh api "repos/${{ github.repository }}/issues/${{ github.event.pull_request.number }}/comments" \
|
||||
--jq '.[] | select(.body | startswith("<!-- INTEGRATION_TESTS_MANUAL -->")) | .id')
|
||||
echo "Previous comment IDs: $previous_comment_ids"
|
||||
# Iterate over each comment ID and delete the comment
|
||||
if [ ! -z "$previous_comment_ids" ]; then
|
||||
echo "$previous_comment_ids" | while read -r comment_id; do
|
||||
echo "Deleting comment with ID: $comment_id"
|
||||
gh api "repos/${{ github.repository }}/issues/comments/$comment_id" -X DELETE
|
||||
done
|
||||
fi
|
||||
|
||||
- name: Comment on PR
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
COMMIT_SHA: ${{ github.event.pull_request.head.sha }}
|
||||
run: |
|
||||
gh pr comment ${{ github.event.pull_request.number }} --body \
|
||||
"<!-- INTEGRATION_TESTS_MANUAL -->
|
||||
If integration tests don't run automatically, an authorized user can run them manually by following the instructions below:
|
||||
|
||||
Trigger:
|
||||
[go/deco-tests-run/cli](https://go/deco-tests-run/cli)
|
||||
|
||||
Inputs:
|
||||
* PR number: ${{github.event.pull_request.number}}
|
||||
* Commit SHA: \`${{ env.COMMIT_SHA }}\`
|
||||
|
||||
Checks will be approved automatically on success.
|
||||
"
|
|
@ -0,0 +1,78 @@
|
|||
name: integration
|
||||
|
||||
on:
|
||||
|
||||
pull_request:
|
||||
types: [opened, synchronize]
|
||||
|
||||
merge_group:
|
||||
|
||||
|
||||
jobs:
|
||||
check-token:
|
||||
runs-on: ubuntu-latest
|
||||
environment: "test-trigger-is"
|
||||
outputs:
|
||||
has_token: ${{ steps.set-token-status.outputs.has_token }}
|
||||
steps:
|
||||
- name: Check if DECO_WORKFLOW_TRIGGER_APP_ID is set
|
||||
id: set-token-status
|
||||
run: |
|
||||
if [ -z "${{ secrets.DECO_WORKFLOW_TRIGGER_APP_ID }}" ]; then
|
||||
echo "DECO_WORKFLOW_TRIGGER_APP_ID is empty. User has no access to secrets."
|
||||
echo "::set-output name=has_token::false"
|
||||
else
|
||||
echo "DECO_WORKFLOW_TRIGGER_APP_ID is set. User has access to secrets."
|
||||
echo "::set-output name=has_token::true"
|
||||
fi
|
||||
|
||||
trigger-tests:
|
||||
runs-on: ubuntu-latest
|
||||
needs: check-token
|
||||
if: github.event_name == 'pull_request' && needs.check-token.outputs.has_token == 'true'
|
||||
environment: "test-trigger-is"
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Generate GitHub App Token
|
||||
id: generate-token
|
||||
uses: actions/create-github-app-token@v1
|
||||
with:
|
||||
app-id: ${{ secrets.DECO_WORKFLOW_TRIGGER_APP_ID }}
|
||||
private-key: ${{ secrets.DECO_WORKFLOW_TRIGGER_PRIVATE_KEY }}
|
||||
owner: ${{ secrets.ORG_NAME }}
|
||||
repositories: ${{secrets.REPO_NAME}}
|
||||
|
||||
- name: Trigger Workflow in Another Repo
|
||||
env:
|
||||
GH_TOKEN: ${{ steps.generate-token.outputs.token }}
|
||||
run: |
|
||||
gh workflow run cli-isolated-pr.yml -R ${{ secrets.ORG_NAME }}/${{secrets.REPO_NAME}} \
|
||||
--ref main \
|
||||
-f pull_request_number=${{ github.event.pull_request.number }} \
|
||||
-f commit_sha=${{ github.event.pull_request.head.sha }}
|
||||
|
||||
|
||||
|
||||
# Statuses and checks apply to specific commits (by hash).
|
||||
# Enforcement of required checks is done both at the PR level and the merge queue level.
|
||||
# In case of multiple commits in a single PR, the hash of the squashed commit
|
||||
# will not match the one for the latest (approved) commit in the PR.
|
||||
# We auto approve the check for the merge queue for two reasons:
|
||||
# * Queue times out due to duration of tests.
|
||||
# * Avoid running integration tests twice, since it was already run at the tip of the branch before squashing.
|
||||
auto-approve:
|
||||
if: github.event_name == 'merge_group'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Mark Check
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
shell: bash
|
||||
run: |
|
||||
gh api -X POST -H "Accept: application/vnd.github+json" \
|
||||
-H "X-GitHub-Api-Version: 2022-11-28" \
|
||||
/repos/${{ github.repository }}/statuses/${{ github.sha }} \
|
||||
-f 'state=success' \
|
||||
-f 'context=Integration Tests Check'
|
|
@ -33,7 +33,7 @@ jobs:
|
|||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.22.7
|
||||
go-version: 1.23.2
|
||||
|
||||
- name: Setup Python
|
||||
uses: actions/setup-python@v5
|
||||
|
@ -68,7 +68,7 @@ jobs:
|
|||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.22.7
|
||||
go-version: 1.23.2
|
||||
|
||||
# No need to download cached dependencies when running gofmt.
|
||||
cache: false
|
||||
|
@ -100,7 +100,7 @@ jobs:
|
|||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.22.7
|
||||
go-version: 1.23.2
|
||||
|
||||
# Github repo: https://github.com/ajv-validator/ajv-cli
|
||||
- name: Install ajv-cli
|
||||
|
|
|
@ -6,6 +6,15 @@ on:
|
|||
- "main"
|
||||
- "demo-*"
|
||||
|
||||
# Confirm that snapshot builds work if this file is modified.
|
||||
pull_request:
|
||||
types:
|
||||
- opened
|
||||
- synchronize
|
||||
- reopened
|
||||
paths:
|
||||
- ".github/workflows/release-snapshot.yml"
|
||||
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
|
@ -21,7 +30,7 @@ jobs:
|
|||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.22.7
|
||||
go-version: 1.23.2
|
||||
|
||||
# The default cache key for this action considers only the `go.sum` file.
|
||||
# We include .goreleaser.yaml here to differentiate from the cache used by the push action
|
||||
|
|
|
@ -22,7 +22,7 @@ jobs:
|
|||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.22.7
|
||||
go-version: 1.23.2
|
||||
|
||||
# The default cache key for this action considers only the `go.sum` file.
|
||||
# We include .goreleaser.yaml here to differentiate from the cache used by the push action
|
||||
|
@ -63,7 +63,7 @@ jobs:
|
|||
echo "VERSION=${VERSION:1}" >> $GITHUB_ENV
|
||||
|
||||
- name: Update setup-cli
|
||||
uses: actions/github-script@v6
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
github-token: ${{ secrets.DECO_GITHUB_TOKEN }}
|
||||
script: |
|
||||
|
@ -87,7 +87,7 @@ jobs:
|
|||
echo "VERSION=${VERSION:1}" >> $GITHUB_ENV
|
||||
|
||||
- name: Update homebrew-tap
|
||||
uses: actions/github-script@v6
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
github-token: ${{ secrets.DECO_GITHUB_TOKEN }}
|
||||
script: |
|
||||
|
@ -124,7 +124,7 @@ jobs:
|
|||
echo "VERSION=${VERSION:1}" >> $GITHUB_ENV
|
||||
|
||||
- name: Update CLI version in the VSCode extension
|
||||
uses: actions/github-script@v6
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
github-token: ${{ secrets.DECO_GITHUB_TOKEN }}
|
||||
script: |
|
||||
|
|
|
@ -95,7 +95,7 @@ checksum:
|
|||
algorithm: sha256
|
||||
|
||||
snapshot:
|
||||
name_template: '{{ incpatch .Version }}-dev+{{ .ShortCommit }}'
|
||||
version_template: '{{ incpatch .Version }}-dev+{{ .ShortCommit }}'
|
||||
|
||||
changelog:
|
||||
sort: asc
|
||||
|
|
114
CHANGELOG.md
114
CHANGELOG.md
|
@ -1,5 +1,119 @@
|
|||
# Version changelog
|
||||
|
||||
## [Release] Release v0.234.0
|
||||
|
||||
Bundles:
|
||||
* Do not execute build on bundle destroy ([#1882](https://github.com/databricks/cli/pull/1882)).
|
||||
* Add support for non-Python ipynb notebooks to DABs ([#1827](https://github.com/databricks/cli/pull/1827)).
|
||||
|
||||
API Changes:
|
||||
* Added `databricks credentials` command group.
|
||||
* Changed `databricks lakeview create` command with new required argument order.
|
||||
|
||||
OpenAPI commit d25296d2f4aa7bd6195c816fdf82e0f960f775da (2024-11-07)
|
||||
Dependency updates:
|
||||
* Upgrade TF provider to 1.58.0 ([#1900](https://github.com/databricks/cli/pull/1900)).
|
||||
* Bump golang.org/x/sync from 0.8.0 to 0.9.0 ([#1892](https://github.com/databricks/cli/pull/1892)).
|
||||
* Bump golang.org/x/text from 0.19.0 to 0.20.0 ([#1893](https://github.com/databricks/cli/pull/1893)).
|
||||
* Bump golang.org/x/mod from 0.21.0 to 0.22.0 ([#1895](https://github.com/databricks/cli/pull/1895)).
|
||||
* Bump golang.org/x/oauth2 from 0.23.0 to 0.24.0 ([#1894](https://github.com/databricks/cli/pull/1894)).
|
||||
* Bump github.com/databricks/databricks-sdk-go from 0.49.0 to 0.51.0 ([#1878](https://github.com/databricks/cli/pull/1878)).
|
||||
|
||||
## [Release] Release v0.233.0
|
||||
|
||||
CLI:
|
||||
* Clean host URL in the `auth login` command ([#1879](https://github.com/databricks/cli/pull/1879)).
|
||||
|
||||
Bundles:
|
||||
* Fix bundle run when run interactively ([#1880](https://github.com/databricks/cli/pull/1880)).
|
||||
* Fix relative path resolution for dashboards on Windows ([#1881](https://github.com/databricks/cli/pull/1881)).
|
||||
|
||||
Internal:
|
||||
* Address goreleaser deprecation warning ([#1872](https://github.com/databricks/cli/pull/1872)).
|
||||
* Update actions/github-script to v7 ([#1873](https://github.com/databricks/cli/pull/1873)).
|
||||
* Use Go 1.23 ([#1871](https://github.com/databricks/cli/pull/1871)).
|
||||
* [Internal] Always write message for manual integration test trigger ([#1874](https://github.com/databricks/cli/pull/1874)).
|
||||
* Add `cmd-exec-id` to user agent ([#1808](https://github.com/databricks/cli/pull/1808)).
|
||||
* Added E2E test to run Python wheels on interactive cluster created in bundle ([#1864](https://github.com/databricks/cli/pull/1864)).
|
||||
|
||||
|
||||
Dependency updates:
|
||||
* Bump github.com/hashicorp/terraform-json from 0.22.1 to 0.23.0 ([#1877](https://github.com/databricks/cli/pull/1877)).
|
||||
|
||||
## [Release] Release v0.232.1
|
||||
|
||||
This patch release fixes the following error observed when deploying to /Shared root folder
|
||||
"Error: Path (/Shared/.bundle/.../resources) doesn't exist"
|
||||
|
||||
Bundles:
|
||||
* Fixed adding /Workspace prefix for resource paths ([#1866](https://github.com/databricks/cli/pull/1866)).
|
||||
|
||||
|
||||
## [Release] Release v0.232.0
|
||||
|
||||
**New features for Databricks Asset Bundles:**
|
||||
|
||||
This release adds support for managing AI/BI dashboards as part of your bundle configuration. The `bundle generate` command is updated to support producing dashboard bundle configuration as well as a serialized JSON representation of the dashboard.
|
||||
You can find an example configuration and walkthrough at https://github.com/databricks/bundle-examples/tree/main/knowledge_base/dashboard_nyc_taxi
|
||||
|
||||
CLI:
|
||||
* Add privacy notice to README ([#1841](https://github.com/databricks/cli/pull/1841)).
|
||||
|
||||
Bundles:
|
||||
* Add support for AI/BI dashboards ([#1743](https://github.com/databricks/cli/pull/1743)).
|
||||
* Added validator for folder permissions ([#1824](https://github.com/databricks/cli/pull/1824)).
|
||||
* Add bundle generate variant for dashboards ([#1847](https://github.com/databricks/cli/pull/1847)).
|
||||
* Use SetPermissions instead of UpdatePermissions when setting folder permissions based on top-level ones ([#1822](https://github.com/databricks/cli/pull/1822)).
|
||||
|
||||
Internal:
|
||||
* Attempt to reduce test flakiness on Windows ([#1845](https://github.com/databricks/cli/pull/1845)).
|
||||
* Reuse resource resolution code for the run command ([#1858](https://github.com/databricks/cli/pull/1858)).
|
||||
* [Internal] Automatically trigger integration tests on PR ([#1857](https://github.com/databricks/cli/pull/1857)).
|
||||
* [Internal] Add test instructions for external contributors ([#1863](https://github.com/databricks/cli/pull/1863)).
|
||||
* Add `libs/dyn/jsonsaver` ([#1862](https://github.com/databricks/cli/pull/1862)).
|
||||
|
||||
|
||||
Dependency updates:
|
||||
* Bump github.com/fatih/color from 1.17.0 to 1.18.0 ([#1861](https://github.com/databricks/cli/pull/1861)).
|
||||
|
||||
## [Release] Release v0.231.0
|
||||
|
||||
CLI:
|
||||
* Added JSON input validation for CLI commands ([#1771](https://github.com/databricks/cli/pull/1771)).
|
||||
* Support Git worktrees for `sync` ([#1831](https://github.com/databricks/cli/pull/1831)).
|
||||
|
||||
Bundles:
|
||||
* Add `bundle summary` to display URLs for deployed resources ([#1731](https://github.com/databricks/cli/pull/1731)).
|
||||
* Added a warning when incorrect permissions used for `/Workspace/Shared` bundle root ([#1821](https://github.com/databricks/cli/pull/1821)).
|
||||
* Show actionable errors for collaborative deployment scenarios ([#1386](https://github.com/databricks/cli/pull/1386)).
|
||||
* Fix path to repository-wide exclude file ([#1837](https://github.com/databricks/cli/pull/1837)).
|
||||
* Fixed typo in converting cluster permissions ([#1826](https://github.com/databricks/cli/pull/1826)).
|
||||
* Ignore metastore permission error during template generation ([#1819](https://github.com/databricks/cli/pull/1819)).
|
||||
* Handle normalization of `dyn.KindTime` into an any type ([#1836](https://github.com/databricks/cli/pull/1836)).
|
||||
* Added support for pip options in environment dependencies ([#1842](https://github.com/databricks/cli/pull/1842)).
|
||||
* Fix race condition when restarting continuous jobs ([#1849](https://github.com/databricks/cli/pull/1849)).
|
||||
* Fix pipeline in default-python template not working for certain workspaces ([#1854](https://github.com/databricks/cli/pull/1854)).
|
||||
* Add "output" flag to the bundle sync command ([#1853](https://github.com/databricks/cli/pull/1853)).
|
||||
|
||||
Internal:
|
||||
* Move utility functions dealing with IAM to libs/iamutil ([#1820](https://github.com/databricks/cli/pull/1820)).
|
||||
* Remove unused `IS_OWNER` constant ([#1823](https://github.com/databricks/cli/pull/1823)).
|
||||
* Assert SDK version is consistent in the CLI generation process ([#1814](https://github.com/databricks/cli/pull/1814)).
|
||||
* Fixed unmarshalling json input into `interface{}` type ([#1832](https://github.com/databricks/cli/pull/1832)).
|
||||
* Fix `TestAccFsMkdirWhenFileExistsAtPath` in isolated Azure environments ([#1833](https://github.com/databricks/cli/pull/1833)).
|
||||
* Add behavioral tests for examples from the YAML spec ([#1835](https://github.com/databricks/cli/pull/1835)).
|
||||
* Remove Terraform conversion function that's no longer used ([#1840](https://github.com/databricks/cli/pull/1840)).
|
||||
* Encode assumptions about the dashboards API in a test ([#1839](https://github.com/databricks/cli/pull/1839)).
|
||||
* Add script to make testing of code on branches easier ([#1844](https://github.com/databricks/cli/pull/1844)).
|
||||
|
||||
API Changes:
|
||||
* Added `databricks disable-legacy-dbfs` command group.
|
||||
|
||||
OpenAPI commit cf9c61453990df0f9453670f2fe68e1b128647a2 (2024-10-14)
|
||||
Dependency updates:
|
||||
* Upgrade TF provider to 1.54.0 ([#1852](https://github.com/databricks/cli/pull/1852)).
|
||||
* Bump github.com/databricks/databricks-sdk-go from 0.48.0 to 0.49.0 ([#1843](https://github.com/databricks/cli/pull/1843)).
|
||||
|
||||
## [Release] Release v0.230.0
|
||||
|
||||
Notable changes for Databricks Asset Bundles:
|
||||
|
|
|
@ -35,3 +35,6 @@ docker run -e DATABRICKS_HOST=$YOUR_HOST_URL -e DATABRICKS_TOKEN=$YOUR_TOKEN ghc
|
|||
This CLI follows the Databricks Unified Authentication principles.
|
||||
|
||||
You can find a detailed description at https://github.com/databricks/databricks-sdk-go#authentication.
|
||||
|
||||
## Privacy Notice
|
||||
Databricks CLI use is subject to the [Databricks License](https://github.com/databricks/cli/blob/main/LICENSE) and [Databricks Privacy Notice](https://www.databricks.com/legal/privacynotice), including any Usage Data provisions.
|
||||
|
|
|
@ -0,0 +1,18 @@
|
|||
package generate
|
||||
|
||||
import (
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
"github.com/databricks/databricks-sdk-go/service/dashboards"
|
||||
)
|
||||
|
||||
func ConvertDashboardToValue(dashboard *dashboards.Dashboard, filePath string) (dyn.Value, error) {
|
||||
// The majority of fields of the dashboard struct are read-only.
|
||||
// We copy the relevant fields manually.
|
||||
dv := map[string]dyn.Value{
|
||||
"display_name": dyn.NewValue(dashboard.DisplayName, []dyn.Location{{Line: 1}}),
|
||||
"warehouse_id": dyn.NewValue(dashboard.WarehouseId, []dyn.Location{{Line: 2}}),
|
||||
"file_path": dyn.NewValue(filePath, []dyn.Location{{Line: 3}}),
|
||||
}
|
||||
|
||||
return dyn.V(dv), nil
|
||||
}
|
|
@ -212,6 +212,15 @@ func (m *applyPresets) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnos
|
|||
}
|
||||
}
|
||||
|
||||
// Dashboards: Prefix
|
||||
for key, dashboard := range r.Dashboards {
|
||||
if dashboard == nil || dashboard.Dashboard == nil {
|
||||
diags = diags.Extend(diag.Errorf("dashboard %s s is not defined", key))
|
||||
continue
|
||||
}
|
||||
dashboard.DisplayName = prefix + dashboard.DisplayName
|
||||
}
|
||||
|
||||
return diags
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,70 @@
|
|||
package mutator
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
)
|
||||
|
||||
type configureDashboardDefaults struct{}
|
||||
|
||||
func ConfigureDashboardDefaults() bundle.Mutator {
|
||||
return &configureDashboardDefaults{}
|
||||
}
|
||||
|
||||
func (m *configureDashboardDefaults) Name() string {
|
||||
return "ConfigureDashboardDefaults"
|
||||
}
|
||||
|
||||
func (m *configureDashboardDefaults) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||
var diags diag.Diagnostics
|
||||
|
||||
pattern := dyn.NewPattern(
|
||||
dyn.Key("resources"),
|
||||
dyn.Key("dashboards"),
|
||||
dyn.AnyKey(),
|
||||
)
|
||||
|
||||
// Configure defaults for all dashboards.
|
||||
err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) {
|
||||
return dyn.MapByPattern(v, pattern, func(p dyn.Path, v dyn.Value) (dyn.Value, error) {
|
||||
var err error
|
||||
v, err = setIfNotExists(v, dyn.NewPath(dyn.Key("parent_path")), dyn.V(b.Config.Workspace.ResourcePath))
|
||||
if err != nil {
|
||||
return dyn.InvalidValue, err
|
||||
}
|
||||
v, err = setIfNotExists(v, dyn.NewPath(dyn.Key("embed_credentials")), dyn.V(false))
|
||||
if err != nil {
|
||||
return dyn.InvalidValue, err
|
||||
}
|
||||
return v, nil
|
||||
})
|
||||
})
|
||||
|
||||
diags = diags.Extend(diag.FromErr(err))
|
||||
return diags
|
||||
}
|
||||
|
||||
func setIfNotExists(v dyn.Value, path dyn.Path, defaultValue dyn.Value) (dyn.Value, error) {
|
||||
// Get the field at the specified path (if set).
|
||||
_, err := dyn.GetByPath(v, path)
|
||||
switch {
|
||||
case dyn.IsNoSuchKeyError(err):
|
||||
// OK, we'll set the default value.
|
||||
break
|
||||
case dyn.IsCannotTraverseNilError(err):
|
||||
// Cannot traverse the value, skip it.
|
||||
return v, nil
|
||||
case err == nil:
|
||||
// The field is set, skip it.
|
||||
return v, nil
|
||||
default:
|
||||
// Return the error.
|
||||
return v, err
|
||||
}
|
||||
|
||||
// Set the field at the specified path.
|
||||
return dyn.SetByPath(v, path, defaultValue)
|
||||
}
|
|
@ -0,0 +1,130 @@
|
|||
package mutator_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/bundle/config/mutator"
|
||||
"github.com/databricks/cli/bundle/config/resources"
|
||||
"github.com/databricks/cli/bundle/internal/bundletest"
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
"github.com/databricks/databricks-sdk-go/service/dashboards"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestConfigureDashboardDefaultsParentPath(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Workspace: config.Workspace{
|
||||
ResourcePath: "/foo/bar",
|
||||
},
|
||||
Resources: config.Resources{
|
||||
Dashboards: map[string]*resources.Dashboard{
|
||||
"d1": {
|
||||
// Empty string is skipped.
|
||||
// See below for how it is set.
|
||||
Dashboard: &dashboards.Dashboard{
|
||||
ParentPath: "",
|
||||
},
|
||||
},
|
||||
"d2": {
|
||||
// Non-empty string is skipped.
|
||||
Dashboard: &dashboards.Dashboard{
|
||||
ParentPath: "already-set",
|
||||
},
|
||||
},
|
||||
"d3": {
|
||||
// No parent path set.
|
||||
},
|
||||
"d4": nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// We can't set an empty string in the typed configuration.
|
||||
// Do it on the dyn.Value directly.
|
||||
bundletest.Mutate(t, b, func(v dyn.Value) (dyn.Value, error) {
|
||||
return dyn.Set(v, "resources.dashboards.d1.parent_path", dyn.V(""))
|
||||
})
|
||||
|
||||
diags := bundle.Apply(context.Background(), b, mutator.ConfigureDashboardDefaults())
|
||||
require.NoError(t, diags.Error())
|
||||
|
||||
var v dyn.Value
|
||||
var err error
|
||||
|
||||
// Set to empty string; unchanged.
|
||||
v, err = dyn.Get(b.Config.Value(), "resources.dashboards.d1.parent_path")
|
||||
if assert.NoError(t, err) {
|
||||
assert.Equal(t, "", v.MustString())
|
||||
}
|
||||
|
||||
// Set to "already-set"; unchanged.
|
||||
v, err = dyn.Get(b.Config.Value(), "resources.dashboards.d2.parent_path")
|
||||
if assert.NoError(t, err) {
|
||||
assert.Equal(t, "already-set", v.MustString())
|
||||
}
|
||||
|
||||
// Not set; now set to the workspace resource path.
|
||||
v, err = dyn.Get(b.Config.Value(), "resources.dashboards.d3.parent_path")
|
||||
if assert.NoError(t, err) {
|
||||
assert.Equal(t, "/foo/bar", v.MustString())
|
||||
}
|
||||
|
||||
// No valid dashboard; no change.
|
||||
_, err = dyn.Get(b.Config.Value(), "resources.dashboards.d4.parent_path")
|
||||
assert.True(t, dyn.IsCannotTraverseNilError(err))
|
||||
}
|
||||
|
||||
func TestConfigureDashboardDefaultsEmbedCredentials(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Resources: config.Resources{
|
||||
Dashboards: map[string]*resources.Dashboard{
|
||||
"d1": {
|
||||
EmbedCredentials: true,
|
||||
},
|
||||
"d2": {
|
||||
EmbedCredentials: false,
|
||||
},
|
||||
"d3": {
|
||||
// No parent path set.
|
||||
},
|
||||
"d4": nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
diags := bundle.Apply(context.Background(), b, mutator.ConfigureDashboardDefaults())
|
||||
require.NoError(t, diags.Error())
|
||||
|
||||
var v dyn.Value
|
||||
var err error
|
||||
|
||||
// Set to true; still true.
|
||||
v, err = dyn.Get(b.Config.Value(), "resources.dashboards.d1.embed_credentials")
|
||||
if assert.NoError(t, err) {
|
||||
assert.Equal(t, true, v.MustBool())
|
||||
}
|
||||
|
||||
// Set to false; still false.
|
||||
v, err = dyn.Get(b.Config.Value(), "resources.dashboards.d2.embed_credentials")
|
||||
if assert.NoError(t, err) {
|
||||
assert.Equal(t, false, v.MustBool())
|
||||
}
|
||||
|
||||
// Not set; now false.
|
||||
v, err = dyn.Get(b.Config.Value(), "resources.dashboards.d3.embed_credentials")
|
||||
if assert.NoError(t, err) {
|
||||
assert.Equal(t, false, v.MustBool())
|
||||
}
|
||||
|
||||
// No valid dashboard; no change.
|
||||
_, err = dyn.Get(b.Config.Value(), "resources.dashboards.d4.embed_credentials")
|
||||
assert.True(t, dyn.IsCannotTraverseNilError(err))
|
||||
}
|
|
@ -5,14 +5,12 @@ import (
|
|||
"strings"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/libs/dbr"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/cli/libs/env"
|
||||
"github.com/databricks/cli/libs/filer"
|
||||
"github.com/databricks/cli/libs/vfs"
|
||||
)
|
||||
|
||||
const envDatabricksRuntimeVersion = "DATABRICKS_RUNTIME_VERSION"
|
||||
|
||||
type configureWSFS struct{}
|
||||
|
||||
func ConfigureWSFS() bundle.Mutator {
|
||||
|
@ -32,7 +30,7 @@ func (m *configureWSFS) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagno
|
|||
}
|
||||
|
||||
// The executable must be running on DBR.
|
||||
if _, ok := env.Lookup(ctx, envDatabricksRuntimeVersion); !ok {
|
||||
if !dbr.RunsOnRuntime(ctx) {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,65 @@
|
|||
package mutator_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config/mutator"
|
||||
"github.com/databricks/cli/libs/dbr"
|
||||
"github.com/databricks/cli/libs/vfs"
|
||||
"github.com/databricks/databricks-sdk-go/config"
|
||||
"github.com/databricks/databricks-sdk-go/experimental/mocks"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func mockBundleForConfigureWSFS(t *testing.T, syncRootPath string) *bundle.Bundle {
|
||||
// The native path of the sync root on Windows will never match the /Workspace prefix,
|
||||
// so the test case for nominal behavior will always fail.
|
||||
if runtime.GOOS == "windows" {
|
||||
t.Skip("this test is not applicable on Windows")
|
||||
}
|
||||
|
||||
b := &bundle.Bundle{
|
||||
SyncRoot: vfs.MustNew(syncRootPath),
|
||||
}
|
||||
|
||||
w := mocks.NewMockWorkspaceClient(t)
|
||||
w.WorkspaceClient.Config = &config.Config{}
|
||||
b.SetWorkpaceClient(w.WorkspaceClient)
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
func TestConfigureWSFS_SkipsIfNotWorkspacePrefix(t *testing.T) {
|
||||
b := mockBundleForConfigureWSFS(t, "/foo")
|
||||
originalSyncRoot := b.SyncRoot
|
||||
|
||||
ctx := context.Background()
|
||||
diags := bundle.Apply(ctx, b, mutator.ConfigureWSFS())
|
||||
assert.Empty(t, diags)
|
||||
assert.Equal(t, originalSyncRoot, b.SyncRoot)
|
||||
}
|
||||
|
||||
func TestConfigureWSFS_SkipsIfNotRunningOnRuntime(t *testing.T) {
|
||||
b := mockBundleForConfigureWSFS(t, "/Workspace/foo")
|
||||
originalSyncRoot := b.SyncRoot
|
||||
|
||||
ctx := context.Background()
|
||||
ctx = dbr.MockRuntime(ctx, false)
|
||||
diags := bundle.Apply(ctx, b, mutator.ConfigureWSFS())
|
||||
assert.Empty(t, diags)
|
||||
assert.Equal(t, originalSyncRoot, b.SyncRoot)
|
||||
}
|
||||
|
||||
func TestConfigureWSFS_SwapSyncRoot(t *testing.T) {
|
||||
b := mockBundleForConfigureWSFS(t, "/Workspace/foo")
|
||||
originalSyncRoot := b.SyncRoot
|
||||
|
||||
ctx := context.Background()
|
||||
ctx = dbr.MockRuntime(ctx, true)
|
||||
diags := bundle.Apply(ctx, b, mutator.ConfigureWSFS())
|
||||
assert.Empty(t, diags)
|
||||
assert.NotEqual(t, originalSyncRoot, b.SyncRoot)
|
||||
}
|
|
@ -8,6 +8,7 @@ import (
|
|||
"github.com/databricks/cli/bundle/config/resources"
|
||||
"github.com/databricks/databricks-sdk-go/service/catalog"
|
||||
"github.com/databricks/databricks-sdk-go/service/compute"
|
||||
"github.com/databricks/databricks-sdk-go/service/dashboards"
|
||||
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||
"github.com/databricks/databricks-sdk-go/service/ml"
|
||||
"github.com/databricks/databricks-sdk-go/service/pipelines"
|
||||
|
@ -64,9 +65,8 @@ func TestInitializeURLs(t *testing.T) {
|
|||
},
|
||||
QualityMonitors: map[string]*resources.QualityMonitor{
|
||||
"qualityMonitor1": {
|
||||
CreateMonitor: &catalog.CreateMonitor{
|
||||
TableName: "catalog.schema.qualityMonitor1",
|
||||
},
|
||||
TableName: "catalog.schema.qualityMonitor1",
|
||||
CreateMonitor: &catalog.CreateMonitor{},
|
||||
},
|
||||
},
|
||||
Schemas: map[string]*resources.Schema{
|
||||
|
@ -85,6 +85,14 @@ func TestInitializeURLs(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
Dashboards: map[string]*resources.Dashboard{
|
||||
"dashboard1": {
|
||||
ID: "01ef8d56871e1d50ae30ce7375e42478",
|
||||
Dashboard: &dashboards.Dashboard{
|
||||
DisplayName: "My special dashboard",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
@ -99,6 +107,7 @@ func TestInitializeURLs(t *testing.T) {
|
|||
"qualityMonitor1": "https://mycompany.databricks.com/explore/data/catalog/schema/qualityMonitor1?o=123456",
|
||||
"schema1": "https://mycompany.databricks.com/explore/data/catalog/schema?o=123456",
|
||||
"cluster1": "https://mycompany.databricks.com/compute/clusters/1017-103929-vlr7jzcf?o=123456",
|
||||
"dashboard1": "https://mycompany.databricks.com/dashboardsv3/01ef8d56871e1d50ae30ce7375e42478/published?o=123456",
|
||||
}
|
||||
|
||||
initializeForWorkspace(b, "123456", "https://mycompany.databricks.com/")
|
||||
|
|
|
@ -32,6 +32,7 @@ func (m *prependWorkspacePrefix) Apply(ctx context.Context, b *bundle.Bundle) di
|
|||
dyn.NewPattern(dyn.Key("workspace"), dyn.Key("file_path")),
|
||||
dyn.NewPattern(dyn.Key("workspace"), dyn.Key("artifact_path")),
|
||||
dyn.NewPattern(dyn.Key("workspace"), dyn.Key("state_path")),
|
||||
dyn.NewPattern(dyn.Key("workspace"), dyn.Key("resource_path")),
|
||||
}
|
||||
|
||||
err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) {
|
||||
|
@ -43,6 +44,11 @@ func (m *prependWorkspacePrefix) Apply(ctx context.Context, b *bundle.Bundle) di
|
|||
return dyn.InvalidValue, fmt.Errorf("expected string, got %s", v.Kind())
|
||||
}
|
||||
|
||||
// Skip prefixing if the path does not start with /, it might be variable reference or smth else.
|
||||
if !strings.HasPrefix(path, "/") {
|
||||
return pv, nil
|
||||
}
|
||||
|
||||
for _, prefix := range skipPrefixes {
|
||||
if strings.HasPrefix(path, prefix) {
|
||||
return pv, nil
|
||||
|
|
|
@ -31,6 +31,14 @@ func TestPrependWorkspacePrefix(t *testing.T) {
|
|||
path: "/Volumes/Users/test",
|
||||
expected: "/Volumes/Users/test",
|
||||
},
|
||||
{
|
||||
path: "~/test",
|
||||
expected: "~/test",
|
||||
},
|
||||
{
|
||||
path: "${workspace.file_path}/test",
|
||||
expected: "${workspace.file_path}/test",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
|
@ -41,6 +49,7 @@ func TestPrependWorkspacePrefix(t *testing.T) {
|
|||
ArtifactPath: tc.path,
|
||||
FilePath: tc.path,
|
||||
StatePath: tc.path,
|
||||
ResourcePath: tc.path,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
@ -51,6 +60,7 @@ func TestPrependWorkspacePrefix(t *testing.T) {
|
|||
require.Equal(t, tc.expected, b.Config.Workspace.ArtifactPath)
|
||||
require.Equal(t, tc.expected, b.Config.Workspace.FilePath)
|
||||
require.Equal(t, tc.expected, b.Config.Workspace.StatePath)
|
||||
require.Equal(t, tc.expected, b.Config.Workspace.ResourcePath)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -76,4 +86,5 @@ func TestPrependWorkspaceForDefaultConfig(t *testing.T) {
|
|||
require.Equal(t, "/Workspace/Users/jane@doe.com/.bundle/test/dev/artifacts", b.Config.Workspace.ArtifactPath)
|
||||
require.Equal(t, "/Workspace/Users/jane@doe.com/.bundle/test/dev/files", b.Config.Workspace.FilePath)
|
||||
require.Equal(t, "/Workspace/Users/jane@doe.com/.bundle/test/dev/state", b.Config.Workspace.StatePath)
|
||||
require.Equal(t, "/Workspace/Users/jane@doe.com/.bundle/test/dev/resources", b.Config.Workspace.ResourcePath)
|
||||
}
|
||||
|
|
|
@ -14,6 +14,7 @@ import (
|
|||
sdkconfig "github.com/databricks/databricks-sdk-go/config"
|
||||
"github.com/databricks/databricks-sdk-go/service/catalog"
|
||||
"github.com/databricks/databricks-sdk-go/service/compute"
|
||||
"github.com/databricks/databricks-sdk-go/service/dashboards"
|
||||
"github.com/databricks/databricks-sdk-go/service/iam"
|
||||
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||
"github.com/databricks/databricks-sdk-go/service/ml"
|
||||
|
@ -101,16 +102,23 @@ func mockBundle(mode config.Mode) *bundle.Bundle {
|
|||
"registeredmodel1": {CreateRegisteredModelRequest: &catalog.CreateRegisteredModelRequest{Name: "registeredmodel1"}},
|
||||
},
|
||||
QualityMonitors: map[string]*resources.QualityMonitor{
|
||||
"qualityMonitor1": {CreateMonitor: &catalog.CreateMonitor{TableName: "qualityMonitor1"}},
|
||||
"qualityMonitor2": {
|
||||
"qualityMonitor1": {
|
||||
TableName: "qualityMonitor1",
|
||||
CreateMonitor: &catalog.CreateMonitor{
|
||||
TableName: "qualityMonitor2",
|
||||
Schedule: &catalog.MonitorCronSchedule{},
|
||||
OutputSchemaName: "catalog.schema",
|
||||
},
|
||||
},
|
||||
"qualityMonitor2": {
|
||||
TableName: "qualityMonitor2",
|
||||
CreateMonitor: &catalog.CreateMonitor{
|
||||
OutputSchemaName: "catalog.schema",
|
||||
Schedule: &catalog.MonitorCronSchedule{},
|
||||
},
|
||||
},
|
||||
"qualityMonitor3": {
|
||||
TableName: "qualityMonitor3",
|
||||
CreateMonitor: &catalog.CreateMonitor{
|
||||
TableName: "qualityMonitor3",
|
||||
OutputSchemaName: "catalog.schema",
|
||||
Schedule: &catalog.MonitorCronSchedule{
|
||||
PauseStatus: catalog.MonitorCronSchedulePauseStatusUnpaused,
|
||||
},
|
||||
|
@ -123,6 +131,13 @@ func mockBundle(mode config.Mode) *bundle.Bundle {
|
|||
Clusters: map[string]*resources.Cluster{
|
||||
"cluster1": {ClusterSpec: &compute.ClusterSpec{ClusterName: "cluster1", SparkVersion: "13.2.x", NumWorkers: 1}},
|
||||
},
|
||||
Dashboards: map[string]*resources.Dashboard{
|
||||
"dashboard1": {
|
||||
Dashboard: &dashboards.Dashboard{
|
||||
DisplayName: "dashboard1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
// Use AWS implementation for testing.
|
||||
|
@ -184,6 +199,9 @@ func TestProcessTargetModeDevelopment(t *testing.T) {
|
|||
|
||||
// Clusters
|
||||
assert.Equal(t, "[dev lennart] cluster1", b.Config.Resources.Clusters["cluster1"].ClusterName)
|
||||
|
||||
// Dashboards
|
||||
assert.Equal(t, "[dev lennart] dashboard1", b.Config.Resources.Dashboards["dashboard1"].DisplayName)
|
||||
}
|
||||
|
||||
func TestProcessTargetModeDevelopmentTagNormalizationForAws(t *testing.T) {
|
||||
|
@ -272,6 +290,7 @@ func TestValidateDevelopmentMode(t *testing.T) {
|
|||
b.Config.Workspace.StatePath = "/Users/lennart@company.com/.bundle/x/y/state"
|
||||
b.Config.Workspace.FilePath = "/Users/lennart@company.com/.bundle/x/y/files"
|
||||
b.Config.Workspace.ArtifactPath = "/Users/lennart@company.com/.bundle/x/y/artifacts"
|
||||
b.Config.Workspace.ResourcePath = "/Users/lennart@company.com/.bundle/x/y/resources"
|
||||
diags = validateDevelopmentMode(b)
|
||||
require.NoError(t, diags.Error())
|
||||
}
|
||||
|
@ -300,6 +319,7 @@ func TestProcessTargetModeProduction(t *testing.T) {
|
|||
b.Config.Workspace.StatePath = "/Shared/.bundle/x/y/state"
|
||||
b.Config.Workspace.ArtifactPath = "/Shared/.bundle/x/y/artifacts"
|
||||
b.Config.Workspace.FilePath = "/Shared/.bundle/x/y/files"
|
||||
b.Config.Workspace.ResourcePath = "/Shared/.bundle/x/y/resources"
|
||||
|
||||
diags = validateProductionMode(context.Background(), b, false)
|
||||
require.ErrorContains(t, diags.Error(), "production")
|
||||
|
|
|
@ -110,6 +110,16 @@ func validateRunAs(b *bundle.Bundle) diag.Diagnostics {
|
|||
))
|
||||
}
|
||||
|
||||
// Dashboards do not support run_as in the API.
|
||||
if len(b.Config.Resources.Dashboards) > 0 {
|
||||
diags = diags.Extend(reportRunAsNotSupported(
|
||||
"dashboards",
|
||||
b.Config.GetLocation("resources.dashboards"),
|
||||
b.Config.Workspace.CurrentUser.UserName,
|
||||
identity,
|
||||
))
|
||||
}
|
||||
|
||||
return diags
|
||||
}
|
||||
|
||||
|
|
|
@ -33,6 +33,7 @@ func allResourceTypes(t *testing.T) []string {
|
|||
// also update this check when adding a new resource
|
||||
require.Equal(t, []string{
|
||||
"clusters",
|
||||
"dashboards",
|
||||
"experiments",
|
||||
"jobs",
|
||||
"model_serving_endpoints",
|
||||
|
@ -188,6 +189,7 @@ func TestRunAsErrorForUnsupportedResources(t *testing.T) {
|
|||
Config: *r,
|
||||
}
|
||||
diags := bundle.Apply(context.Background(), b, SetRunAs())
|
||||
require.Error(t, diags.Error())
|
||||
assert.Contains(t, diags.Error().Error(), "do not support a setting a run_as user that is different from the owner.\n"+
|
||||
"Current identity: alice. Run as identity: bob.\n"+
|
||||
"See https://docs.databricks.com/dev-tools/bundles/run-as.html to learn more about the run_as property.", rt)
|
||||
|
|
|
@ -162,6 +162,20 @@ func (t *translateContext) translateNoOp(literal, localFullPath, localRelPath, r
|
|||
return localRelPath, nil
|
||||
}
|
||||
|
||||
func (t *translateContext) retainLocalAbsoluteFilePath(literal, localFullPath, localRelPath, remotePath string) (string, error) {
|
||||
info, err := t.b.SyncRoot.Stat(filepath.ToSlash(localRelPath))
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return "", fmt.Errorf("file %s not found", literal)
|
||||
}
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("unable to determine if %s is a file: %w", localFullPath, err)
|
||||
}
|
||||
if info.IsDir() {
|
||||
return "", fmt.Errorf("expected %s to be a file but found a directory", literal)
|
||||
}
|
||||
return localFullPath, nil
|
||||
}
|
||||
|
||||
func (t *translateContext) translateNoOpWithPrefix(literal, localFullPath, localRelPath, remotePath string) (string, error) {
|
||||
if !strings.HasPrefix(localRelPath, ".") {
|
||||
localRelPath = "." + string(filepath.Separator) + localRelPath
|
||||
|
@ -215,6 +229,7 @@ func (m *translatePaths) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnos
|
|||
t.applyJobTranslations,
|
||||
t.applyPipelineTranslations,
|
||||
t.applyArtifactTranslations,
|
||||
t.applyDashboardTranslations,
|
||||
} {
|
||||
v, err = fn(v)
|
||||
if err != nil {
|
||||
|
|
|
@ -0,0 +1,28 @@
|
|||
package mutator
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
)
|
||||
|
||||
func (t *translateContext) applyDashboardTranslations(v dyn.Value) (dyn.Value, error) {
|
||||
// Convert the `file_path` field to a local absolute path.
|
||||
// We load the file at this path and use its contents for the dashboard contents.
|
||||
pattern := dyn.NewPattern(
|
||||
dyn.Key("resources"),
|
||||
dyn.Key("dashboards"),
|
||||
dyn.AnyKey(),
|
||||
dyn.Key("file_path"),
|
||||
)
|
||||
|
||||
return dyn.MapByPattern(v, pattern, func(p dyn.Path, v dyn.Value) (dyn.Value, error) {
|
||||
key := p[2].Key()
|
||||
dir, err := v.Location().Directory()
|
||||
if err != nil {
|
||||
return dyn.InvalidValue, fmt.Errorf("unable to determine directory for dashboard %s: %w", key, err)
|
||||
}
|
||||
|
||||
return t.rewriteRelativeTo(p, v, t.retainLocalAbsoluteFilePath, dir, "")
|
||||
})
|
||||
}
|
|
@ -0,0 +1,54 @@
|
|||
package mutator_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/bundle/config/mutator"
|
||||
"github.com/databricks/cli/bundle/config/resources"
|
||||
"github.com/databricks/cli/bundle/internal/bundletest"
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
"github.com/databricks/cli/libs/vfs"
|
||||
"github.com/databricks/databricks-sdk-go/service/dashboards"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestTranslatePathsDashboards_FilePathRelativeSubDirectory(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
touchEmptyFile(t, filepath.Join(dir, "src", "my_dashboard.lvdash.json"))
|
||||
|
||||
b := &bundle.Bundle{
|
||||
SyncRootPath: dir,
|
||||
SyncRoot: vfs.MustNew(dir),
|
||||
Config: config.Root{
|
||||
Resources: config.Resources{
|
||||
Dashboards: map[string]*resources.Dashboard{
|
||||
"dashboard": {
|
||||
Dashboard: &dashboards.Dashboard{
|
||||
DisplayName: "My Dashboard",
|
||||
},
|
||||
FilePath: "../src/my_dashboard.lvdash.json",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
bundletest.SetLocation(b, "resources.dashboards", []dyn.Location{{
|
||||
File: filepath.Join(dir, "resources/dashboard.yml"),
|
||||
}})
|
||||
|
||||
diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
||||
require.NoError(t, diags.Error())
|
||||
|
||||
// Assert that the file path for the dashboard has been converted to its local absolute path.
|
||||
assert.Equal(
|
||||
t,
|
||||
filepath.Join(dir, "src", "my_dashboard.lvdash.json"),
|
||||
b.Config.Resources.Dashboards["dashboard"].FilePath,
|
||||
)
|
||||
}
|
|
@ -21,6 +21,7 @@ type Resources struct {
|
|||
QualityMonitors map[string]*resources.QualityMonitor `json:"quality_monitors,omitempty"`
|
||||
Schemas map[string]*resources.Schema `json:"schemas,omitempty"`
|
||||
Clusters map[string]*resources.Cluster `json:"clusters,omitempty"`
|
||||
Dashboards map[string]*resources.Dashboard `json:"dashboards,omitempty"`
|
||||
}
|
||||
|
||||
type ConfigResource interface {
|
||||
|
@ -77,6 +78,7 @@ func (r *Resources) AllResources() []ResourceGroup {
|
|||
collectResourceMap(descriptions["quality_monitors"], r.QualityMonitors),
|
||||
collectResourceMap(descriptions["schemas"], r.Schemas),
|
||||
collectResourceMap(descriptions["clusters"], r.Clusters),
|
||||
collectResourceMap(descriptions["dashboards"], r.Dashboards),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -175,5 +177,11 @@ func SupportedResources() map[string]ResourceDescription {
|
|||
SingularTitle: "Cluster",
|
||||
PluralTitle: "Clusters",
|
||||
},
|
||||
"dashboards": {
|
||||
SingularName: "dashboard",
|
||||
PluralName: "dashboards",
|
||||
SingularTitle: "Dashboard",
|
||||
PluralTitle: "Dashboards",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,81 @@
|
|||
package resources
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
|
||||
"github.com/databricks/cli/libs/log"
|
||||
"github.com/databricks/databricks-sdk-go"
|
||||
"github.com/databricks/databricks-sdk-go/marshal"
|
||||
"github.com/databricks/databricks-sdk-go/service/dashboards"
|
||||
)
|
||||
|
||||
type Dashboard struct {
|
||||
ID string `json:"id,omitempty" bundle:"readonly"`
|
||||
Permissions []Permission `json:"permissions,omitempty"`
|
||||
ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"`
|
||||
URL string `json:"url,omitempty" bundle:"internal"`
|
||||
|
||||
*dashboards.Dashboard
|
||||
|
||||
// =========================
|
||||
// === Additional fields ===
|
||||
// =========================
|
||||
|
||||
// SerializedDashboard holds the contents of the dashboard in serialized JSON form.
|
||||
// We override the field's type from the SDK struct here to allow for inlining as YAML.
|
||||
// If the value is a string, it is used as is.
|
||||
// If it is not a string, its contents is marshalled as JSON.
|
||||
SerializedDashboard any `json:"serialized_dashboard,omitempty"`
|
||||
|
||||
// EmbedCredentials is a flag to indicate if the publisher's credentials should
|
||||
// be embedded in the published dashboard. These embedded credentials will be used
|
||||
// to execute the published dashboard's queries.
|
||||
//
|
||||
// Defaults to false if not set.
|
||||
EmbedCredentials bool `json:"embed_credentials,omitempty"`
|
||||
|
||||
// FilePath points to the local `.lvdash.json` file containing the dashboard definition.
|
||||
FilePath string `json:"file_path,omitempty"`
|
||||
}
|
||||
|
||||
func (r *Dashboard) UnmarshalJSON(b []byte) error {
|
||||
return marshal.Unmarshal(b, r)
|
||||
}
|
||||
|
||||
func (r Dashboard) MarshalJSON() ([]byte, error) {
|
||||
return marshal.Marshal(r)
|
||||
}
|
||||
|
||||
func (*Dashboard) Exists(ctx context.Context, w *databricks.WorkspaceClient, id string) (bool, error) {
|
||||
_, err := w.Lakeview.Get(ctx, dashboards.GetDashboardRequest{
|
||||
DashboardId: id,
|
||||
})
|
||||
if err != nil {
|
||||
log.Debugf(ctx, "dashboard %s does not exist", id)
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (*Dashboard) TerraformResourceName() string {
|
||||
return "databricks_dashboard"
|
||||
}
|
||||
|
||||
func (r *Dashboard) InitializeURL(baseURL url.URL) {
|
||||
if r.ID == "" {
|
||||
return
|
||||
}
|
||||
|
||||
baseURL.Path = fmt.Sprintf("dashboardsv3/%s/published", r.ID)
|
||||
r.URL = baseURL.String()
|
||||
}
|
||||
|
||||
func (r *Dashboard) GetName() string {
|
||||
return r.DisplayName
|
||||
}
|
||||
|
||||
func (r *Dashboard) GetURL() string {
|
||||
return r.URL
|
||||
}
|
|
@ -1,5 +1,7 @@
|
|||
package resources
|
||||
|
||||
import "fmt"
|
||||
|
||||
// Permission holds the permission level setting for a single principal.
|
||||
// Multiple of these can be defined on any resource.
|
||||
type Permission struct {
|
||||
|
@ -9,3 +11,19 @@ type Permission struct {
|
|||
ServicePrincipalName string `json:"service_principal_name,omitempty"`
|
||||
GroupName string `json:"group_name,omitempty"`
|
||||
}
|
||||
|
||||
func (p Permission) String() string {
|
||||
if p.UserName != "" {
|
||||
return fmt.Sprintf("level: %s, user_name: %s", p.Level, p.UserName)
|
||||
}
|
||||
|
||||
if p.ServicePrincipalName != "" {
|
||||
return fmt.Sprintf("level: %s, service_principal_name: %s", p.Level, p.ServicePrincipalName)
|
||||
}
|
||||
|
||||
if p.GroupName != "" {
|
||||
return fmt.Sprintf("level: %s, group_name: %s", p.Level, p.GroupName)
|
||||
}
|
||||
|
||||
return fmt.Sprintf("level: %s", p.Level)
|
||||
}
|
||||
|
|
|
@ -13,17 +13,15 @@ import (
|
|||
)
|
||||
|
||||
type QualityMonitor struct {
|
||||
// Represents the Input Arguments for Terraform and will get
|
||||
// converted to a HCL representation for CRUD
|
||||
*catalog.CreateMonitor
|
||||
|
||||
// This represents the id which is the full name of the monitor
|
||||
// (catalog_name.schema_name.table_name) that can be used
|
||||
// as a reference in other resources. This value is returned by terraform.
|
||||
ID string `json:"id,omitempty" bundle:"readonly"`
|
||||
|
||||
ID string `json:"id,omitempty" bundle:"readonly"`
|
||||
ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"`
|
||||
URL string `json:"url,omitempty" bundle:"internal"`
|
||||
|
||||
// The table name is a required field but not included as a JSON field in [catalog.CreateMonitor].
|
||||
TableName string `json:"table_name"`
|
||||
|
||||
// This struct defines the creation payload for a monitor.
|
||||
*catalog.CreateMonitor
|
||||
}
|
||||
|
||||
func (s *QualityMonitor) UnmarshalJSON(b []byte) error {
|
||||
|
|
|
@ -0,0 +1,106 @@
|
|||
package validate
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/libraries"
|
||||
"github.com/databricks/cli/bundle/paths"
|
||||
"github.com/databricks/cli/bundle/permissions"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/databricks-sdk-go/apierr"
|
||||
"github.com/databricks/databricks-sdk-go/service/workspace"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
type folderPermissions struct {
|
||||
}
|
||||
|
||||
// Apply implements bundle.ReadOnlyMutator.
|
||||
func (f *folderPermissions) Apply(ctx context.Context, b bundle.ReadOnlyBundle) diag.Diagnostics {
|
||||
if len(b.Config().Permissions) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
bundlePaths := paths.CollectUniqueWorkspacePathPrefixes(b.Config().Workspace)
|
||||
|
||||
var diags diag.Diagnostics
|
||||
g, ctx := errgroup.WithContext(ctx)
|
||||
results := make([]diag.Diagnostics, len(bundlePaths))
|
||||
for i, p := range bundlePaths {
|
||||
g.Go(func() error {
|
||||
results[i] = checkFolderPermission(ctx, b, p)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if err := g.Wait(); err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
for _, r := range results {
|
||||
diags = diags.Extend(r)
|
||||
}
|
||||
|
||||
return diags
|
||||
}
|
||||
|
||||
func checkFolderPermission(ctx context.Context, b bundle.ReadOnlyBundle, folderPath string) diag.Diagnostics {
|
||||
// If the folder is shared, then we don't need to check permissions as it was already checked in the other mutator before.
|
||||
if libraries.IsWorkspaceSharedPath(folderPath) {
|
||||
return nil
|
||||
}
|
||||
|
||||
w := b.WorkspaceClient().Workspace
|
||||
obj, err := getClosestExistingObject(ctx, w, folderPath)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
objPermissions, err := w.GetPermissions(ctx, workspace.GetWorkspaceObjectPermissionsRequest{
|
||||
WorkspaceObjectId: fmt.Sprint(obj.ObjectId),
|
||||
WorkspaceObjectType: "directories",
|
||||
})
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
p := permissions.ObjectAclToResourcePermissions(folderPath, objPermissions.AccessControlList)
|
||||
return p.Compare(b.Config().Permissions)
|
||||
}
|
||||
|
||||
func getClosestExistingObject(ctx context.Context, w workspace.WorkspaceInterface, folderPath string) (*workspace.ObjectInfo, error) {
|
||||
for {
|
||||
obj, err := w.GetStatusByPath(ctx, folderPath)
|
||||
if err == nil {
|
||||
return obj, nil
|
||||
}
|
||||
|
||||
if !apierr.IsMissing(err) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
parent := path.Dir(folderPath)
|
||||
// If the parent is the same as the current folder, then we have reached the root
|
||||
if folderPath == parent {
|
||||
break
|
||||
}
|
||||
|
||||
folderPath = parent
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("folder %s and its parent folders do not exist", folderPath)
|
||||
}
|
||||
|
||||
// Name implements bundle.ReadOnlyMutator.
|
||||
func (f *folderPermissions) Name() string {
|
||||
return "validate:folder_permissions"
|
||||
}
|
||||
|
||||
// ValidateFolderPermissions validates that permissions for the folders in Workspace file system matches
|
||||
// the permissions in the top-level permissions section of the bundle.
|
||||
func ValidateFolderPermissions() bundle.ReadOnlyMutator {
|
||||
return &folderPermissions{}
|
||||
}
|
|
@ -0,0 +1,208 @@
|
|||
package validate
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/bundle/config/resources"
|
||||
"github.com/databricks/cli/bundle/permissions"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/databricks-sdk-go/apierr"
|
||||
"github.com/databricks/databricks-sdk-go/experimental/mocks"
|
||||
"github.com/databricks/databricks-sdk-go/service/workspace"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestFolderPermissionsInheritedWhenRootPathDoesNotExist(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Workspace: config.Workspace{
|
||||
RootPath: "/Workspace/Users/foo@bar.com",
|
||||
ArtifactPath: "/Workspace/Users/otherfoo@bar.com/artifacts",
|
||||
FilePath: "/Workspace/Users/foo@bar.com/files",
|
||||
StatePath: "/Workspace/Users/foo@bar.com/state",
|
||||
ResourcePath: "/Workspace/Users/foo@bar.com/resources",
|
||||
},
|
||||
Permissions: []resources.Permission{
|
||||
{Level: permissions.CAN_MANAGE, UserName: "foo@bar.com"},
|
||||
},
|
||||
},
|
||||
}
|
||||
m := mocks.NewMockWorkspaceClient(t)
|
||||
api := m.GetMockWorkspaceAPI()
|
||||
api.EXPECT().GetStatusByPath(mock.Anything, "/Workspace/Users/otherfoo@bar.com/artifacts").Return(nil, &apierr.APIError{
|
||||
StatusCode: 404,
|
||||
ErrorCode: "RESOURCE_DOES_NOT_EXIST",
|
||||
})
|
||||
api.EXPECT().GetStatusByPath(mock.Anything, "/Workspace/Users/otherfoo@bar.com").Return(nil, &apierr.APIError{
|
||||
StatusCode: 404,
|
||||
ErrorCode: "RESOURCE_DOES_NOT_EXIST",
|
||||
})
|
||||
api.EXPECT().GetStatusByPath(mock.Anything, "/Workspace/Users/foo@bar.com").Return(nil, &apierr.APIError{
|
||||
StatusCode: 404,
|
||||
ErrorCode: "RESOURCE_DOES_NOT_EXIST",
|
||||
})
|
||||
api.EXPECT().GetStatusByPath(mock.Anything, "/Workspace/Users").Return(nil, &apierr.APIError{
|
||||
StatusCode: 404,
|
||||
ErrorCode: "RESOURCE_DOES_NOT_EXIST",
|
||||
})
|
||||
api.EXPECT().GetStatusByPath(mock.Anything, "/Workspace").Return(&workspace.ObjectInfo{
|
||||
ObjectId: 1234,
|
||||
}, nil)
|
||||
|
||||
api.EXPECT().GetPermissions(mock.Anything, workspace.GetWorkspaceObjectPermissionsRequest{
|
||||
WorkspaceObjectId: "1234",
|
||||
WorkspaceObjectType: "directories",
|
||||
}).Return(&workspace.WorkspaceObjectPermissions{
|
||||
ObjectId: "1234",
|
||||
AccessControlList: []workspace.WorkspaceObjectAccessControlResponse{
|
||||
{
|
||||
UserName: "foo@bar.com",
|
||||
AllPermissions: []workspace.WorkspaceObjectPermission{
|
||||
{PermissionLevel: "CAN_MANAGE"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}, nil)
|
||||
|
||||
b.SetWorkpaceClient(m.WorkspaceClient)
|
||||
rb := bundle.ReadOnly(b)
|
||||
|
||||
diags := bundle.ApplyReadOnly(context.Background(), rb, ValidateFolderPermissions())
|
||||
require.Empty(t, diags)
|
||||
}
|
||||
|
||||
func TestValidateFolderPermissionsFailsOnMissingBundlePermission(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Workspace: config.Workspace{
|
||||
RootPath: "/Workspace/Users/foo@bar.com",
|
||||
ArtifactPath: "/Workspace/Users/foo@bar.com/artifacts",
|
||||
FilePath: "/Workspace/Users/foo@bar.com/files",
|
||||
StatePath: "/Workspace/Users/foo@bar.com/state",
|
||||
ResourcePath: "/Workspace/Users/foo@bar.com/resources",
|
||||
},
|
||||
Permissions: []resources.Permission{
|
||||
{Level: permissions.CAN_MANAGE, UserName: "foo@bar.com"},
|
||||
},
|
||||
},
|
||||
}
|
||||
m := mocks.NewMockWorkspaceClient(t)
|
||||
api := m.GetMockWorkspaceAPI()
|
||||
api.EXPECT().GetStatusByPath(mock.Anything, "/Workspace/Users/foo@bar.com").Return(&workspace.ObjectInfo{
|
||||
ObjectId: 1234,
|
||||
}, nil)
|
||||
|
||||
api.EXPECT().GetPermissions(mock.Anything, workspace.GetWorkspaceObjectPermissionsRequest{
|
||||
WorkspaceObjectId: "1234",
|
||||
WorkspaceObjectType: "directories",
|
||||
}).Return(&workspace.WorkspaceObjectPermissions{
|
||||
ObjectId: "1234",
|
||||
AccessControlList: []workspace.WorkspaceObjectAccessControlResponse{
|
||||
{
|
||||
UserName: "foo@bar.com",
|
||||
AllPermissions: []workspace.WorkspaceObjectPermission{
|
||||
{PermissionLevel: "CAN_MANAGE"},
|
||||
},
|
||||
},
|
||||
{
|
||||
UserName: "foo2@bar.com",
|
||||
AllPermissions: []workspace.WorkspaceObjectPermission{
|
||||
{PermissionLevel: "CAN_MANAGE"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}, nil)
|
||||
|
||||
b.SetWorkpaceClient(m.WorkspaceClient)
|
||||
rb := bundle.ReadOnly(b)
|
||||
|
||||
diags := bundle.ApplyReadOnly(context.Background(), rb, ValidateFolderPermissions())
|
||||
require.Len(t, diags, 1)
|
||||
require.Equal(t, "untracked permissions apply to target workspace path", diags[0].Summary)
|
||||
require.Equal(t, diag.Warning, diags[0].Severity)
|
||||
require.Equal(t, "The following permissions apply to the workspace folder at \"/Workspace/Users/foo@bar.com\" but are not configured in the bundle:\n- level: CAN_MANAGE, user_name: foo2@bar.com\n", diags[0].Detail)
|
||||
}
|
||||
|
||||
func TestValidateFolderPermissionsFailsOnPermissionMismatch(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Workspace: config.Workspace{
|
||||
RootPath: "/Workspace/Users/foo@bar.com",
|
||||
ArtifactPath: "/Workspace/Users/foo@bar.com/artifacts",
|
||||
FilePath: "/Workspace/Users/foo@bar.com/files",
|
||||
StatePath: "/Workspace/Users/foo@bar.com/state",
|
||||
ResourcePath: "/Workspace/Users/foo@bar.com/resources",
|
||||
},
|
||||
Permissions: []resources.Permission{
|
||||
{Level: permissions.CAN_MANAGE, UserName: "foo@bar.com"},
|
||||
},
|
||||
},
|
||||
}
|
||||
m := mocks.NewMockWorkspaceClient(t)
|
||||
api := m.GetMockWorkspaceAPI()
|
||||
api.EXPECT().GetStatusByPath(mock.Anything, "/Workspace/Users/foo@bar.com").Return(&workspace.ObjectInfo{
|
||||
ObjectId: 1234,
|
||||
}, nil)
|
||||
|
||||
api.EXPECT().GetPermissions(mock.Anything, workspace.GetWorkspaceObjectPermissionsRequest{
|
||||
WorkspaceObjectId: "1234",
|
||||
WorkspaceObjectType: "directories",
|
||||
}).Return(&workspace.WorkspaceObjectPermissions{
|
||||
ObjectId: "1234",
|
||||
AccessControlList: []workspace.WorkspaceObjectAccessControlResponse{
|
||||
{
|
||||
UserName: "foo2@bar.com",
|
||||
AllPermissions: []workspace.WorkspaceObjectPermission{
|
||||
{PermissionLevel: "CAN_MANAGE"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}, nil)
|
||||
|
||||
b.SetWorkpaceClient(m.WorkspaceClient)
|
||||
rb := bundle.ReadOnly(b)
|
||||
|
||||
diags := bundle.ApplyReadOnly(context.Background(), rb, ValidateFolderPermissions())
|
||||
require.Len(t, diags, 1)
|
||||
require.Equal(t, "untracked permissions apply to target workspace path", diags[0].Summary)
|
||||
require.Equal(t, diag.Warning, diags[0].Severity)
|
||||
}
|
||||
|
||||
func TestValidateFolderPermissionsFailsOnNoRootFolder(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Workspace: config.Workspace{
|
||||
RootPath: "/NotExisting",
|
||||
ArtifactPath: "/NotExisting/artifacts",
|
||||
FilePath: "/NotExisting/files",
|
||||
StatePath: "/NotExisting/state",
|
||||
ResourcePath: "/NotExisting/resources",
|
||||
},
|
||||
Permissions: []resources.Permission{
|
||||
{Level: permissions.CAN_MANAGE, UserName: "foo@bar.com"},
|
||||
},
|
||||
},
|
||||
}
|
||||
m := mocks.NewMockWorkspaceClient(t)
|
||||
api := m.GetMockWorkspaceAPI()
|
||||
api.EXPECT().GetStatusByPath(mock.Anything, "/NotExisting").Return(nil, &apierr.APIError{
|
||||
StatusCode: 404,
|
||||
ErrorCode: "RESOURCE_DOES_NOT_EXIST",
|
||||
})
|
||||
api.EXPECT().GetStatusByPath(mock.Anything, "/").Return(nil, &apierr.APIError{
|
||||
StatusCode: 404,
|
||||
ErrorCode: "RESOURCE_DOES_NOT_EXIST",
|
||||
})
|
||||
|
||||
b.SetWorkpaceClient(m.WorkspaceClient)
|
||||
rb := bundle.ReadOnly(b)
|
||||
|
||||
diags := bundle.ApplyReadOnly(context.Background(), rb, ValidateFolderPermissions())
|
||||
require.Len(t, diags, 1)
|
||||
require.Equal(t, "folder / and its parent folders do not exist", diags[0].Summary)
|
||||
require.Equal(t, diag.Error, diags[0].Severity)
|
||||
}
|
|
@ -35,6 +35,7 @@ func (v *validate) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics
|
|||
FilesToSync(),
|
||||
ValidateSyncPatterns(),
|
||||
JobTaskClusterSpec(),
|
||||
ValidateFolderPermissions(),
|
||||
))
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,117 @@
|
|||
package terraform
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
tfjson "github.com/hashicorp/terraform-json"
|
||||
)
|
||||
|
||||
type dashboardState struct {
|
||||
Name string
|
||||
ID string
|
||||
ETag string
|
||||
}
|
||||
|
||||
func collectDashboardsFromState(ctx context.Context, b *bundle.Bundle) ([]dashboardState, error) {
|
||||
state, err := ParseResourcesState(ctx, b)
|
||||
if err != nil && state == nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var dashboards []dashboardState
|
||||
for _, resource := range state.Resources {
|
||||
if resource.Mode != tfjson.ManagedResourceMode {
|
||||
continue
|
||||
}
|
||||
for _, instance := range resource.Instances {
|
||||
switch resource.Type {
|
||||
case "databricks_dashboard":
|
||||
dashboards = append(dashboards, dashboardState{
|
||||
Name: resource.Name,
|
||||
ID: instance.Attributes.ID,
|
||||
ETag: instance.Attributes.ETag,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return dashboards, nil
|
||||
}
|
||||
|
||||
type checkDashboardsModifiedRemotely struct {
|
||||
}
|
||||
|
||||
func (l *checkDashboardsModifiedRemotely) Name() string {
|
||||
return "CheckDashboardsModifiedRemotely"
|
||||
}
|
||||
|
||||
func (l *checkDashboardsModifiedRemotely) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||
// This mutator is relevant only if the bundle includes dashboards.
|
||||
if len(b.Config.Resources.Dashboards) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// If the user has forced the deployment, skip this check.
|
||||
if b.Config.Bundle.Force {
|
||||
return nil
|
||||
}
|
||||
|
||||
dashboards, err := collectDashboardsFromState(ctx, b)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
var diags diag.Diagnostics
|
||||
for _, dashboard := range dashboards {
|
||||
// Skip dashboards that are not defined in the bundle.
|
||||
// These will be destroyed upon deployment.
|
||||
if _, ok := b.Config.Resources.Dashboards[dashboard.Name]; !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
path := dyn.MustPathFromString(fmt.Sprintf("resources.dashboards.%s", dashboard.Name))
|
||||
loc := b.Config.GetLocation(path.String())
|
||||
actual, err := b.WorkspaceClient().Lakeview.GetByDashboardId(ctx, dashboard.ID)
|
||||
if err != nil {
|
||||
diags = diags.Append(diag.Diagnostic{
|
||||
Severity: diag.Error,
|
||||
Summary: fmt.Sprintf("failed to get dashboard %q", dashboard.Name),
|
||||
Detail: err.Error(),
|
||||
Paths: []dyn.Path{path},
|
||||
Locations: []dyn.Location{loc},
|
||||
})
|
||||
continue
|
||||
}
|
||||
|
||||
// If the ETag is the same, the dashboard has not been modified.
|
||||
if actual.Etag == dashboard.ETag {
|
||||
continue
|
||||
}
|
||||
|
||||
diags = diags.Append(diag.Diagnostic{
|
||||
Severity: diag.Error,
|
||||
Summary: fmt.Sprintf("dashboard %q has been modified remotely", dashboard.Name),
|
||||
Detail: "" +
|
||||
"This dashboard has been modified remotely since the last bundle deployment.\n" +
|
||||
"These modifications are untracked and will be overwritten on deploy.\n" +
|
||||
"\n" +
|
||||
"Make sure that the local dashboard definition matches what you intend to deploy\n" +
|
||||
"before proceeding with the deployment.\n" +
|
||||
"\n" +
|
||||
"Run `databricks bundle deploy --force` to bypass this error." +
|
||||
"",
|
||||
Paths: []dyn.Path{path},
|
||||
Locations: []dyn.Location{loc},
|
||||
})
|
||||
}
|
||||
|
||||
return diags
|
||||
}
|
||||
|
||||
func CheckDashboardsModifiedRemotely() *checkDashboardsModifiedRemotely {
|
||||
return &checkDashboardsModifiedRemotely{}
|
||||
}
|
|
@ -0,0 +1,191 @@
|
|||
package terraform
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/bundle/config/resources"
|
||||
"github.com/databricks/cli/internal/testutil"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/databricks-sdk-go/experimental/mocks"
|
||||
"github.com/databricks/databricks-sdk-go/service/dashboards"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func mockDashboardBundle(t *testing.T) *bundle.Bundle {
|
||||
dir := t.TempDir()
|
||||
b := &bundle.Bundle{
|
||||
BundleRootPath: dir,
|
||||
Config: config.Root{
|
||||
Bundle: config.Bundle{
|
||||
Target: "test",
|
||||
},
|
||||
Resources: config.Resources{
|
||||
Dashboards: map[string]*resources.Dashboard{
|
||||
"dash1": {
|
||||
Dashboard: &dashboards.Dashboard{
|
||||
DisplayName: "My Special Dashboard",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func TestCheckDashboardsModifiedRemotely_NoDashboards(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
b := &bundle.Bundle{
|
||||
BundleRootPath: dir,
|
||||
Config: config.Root{
|
||||
Bundle: config.Bundle{
|
||||
Target: "test",
|
||||
},
|
||||
Resources: config.Resources{},
|
||||
},
|
||||
}
|
||||
|
||||
diags := bundle.Apply(context.Background(), b, CheckDashboardsModifiedRemotely())
|
||||
assert.Empty(t, diags)
|
||||
}
|
||||
|
||||
func TestCheckDashboardsModifiedRemotely_FirstDeployment(t *testing.T) {
|
||||
b := mockDashboardBundle(t)
|
||||
diags := bundle.Apply(context.Background(), b, CheckDashboardsModifiedRemotely())
|
||||
assert.Empty(t, diags)
|
||||
}
|
||||
|
||||
func TestCheckDashboardsModifiedRemotely_ExistingStateNoChange(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
b := mockDashboardBundle(t)
|
||||
writeFakeDashboardState(t, ctx, b)
|
||||
|
||||
// Mock the call to the API.
|
||||
m := mocks.NewMockWorkspaceClient(t)
|
||||
dashboardsAPI := m.GetMockLakeviewAPI()
|
||||
dashboardsAPI.EXPECT().
|
||||
GetByDashboardId(mock.Anything, "id1").
|
||||
Return(&dashboards.Dashboard{
|
||||
DisplayName: "My Special Dashboard",
|
||||
Etag: "1000",
|
||||
}, nil).
|
||||
Once()
|
||||
b.SetWorkpaceClient(m.WorkspaceClient)
|
||||
|
||||
// No changes, so no diags.
|
||||
diags := bundle.Apply(ctx, b, CheckDashboardsModifiedRemotely())
|
||||
assert.Empty(t, diags)
|
||||
}
|
||||
|
||||
func TestCheckDashboardsModifiedRemotely_ExistingStateChange(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
b := mockDashboardBundle(t)
|
||||
writeFakeDashboardState(t, ctx, b)
|
||||
|
||||
// Mock the call to the API.
|
||||
m := mocks.NewMockWorkspaceClient(t)
|
||||
dashboardsAPI := m.GetMockLakeviewAPI()
|
||||
dashboardsAPI.EXPECT().
|
||||
GetByDashboardId(mock.Anything, "id1").
|
||||
Return(&dashboards.Dashboard{
|
||||
DisplayName: "My Special Dashboard",
|
||||
Etag: "1234",
|
||||
}, nil).
|
||||
Once()
|
||||
b.SetWorkpaceClient(m.WorkspaceClient)
|
||||
|
||||
// The dashboard has changed, so expect an error.
|
||||
diags := bundle.Apply(ctx, b, CheckDashboardsModifiedRemotely())
|
||||
if assert.Len(t, diags, 1) {
|
||||
assert.Equal(t, diag.Error, diags[0].Severity)
|
||||
assert.Equal(t, `dashboard "dash1" has been modified remotely`, diags[0].Summary)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckDashboardsModifiedRemotely_ExistingStateFailureToGet(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
b := mockDashboardBundle(t)
|
||||
writeFakeDashboardState(t, ctx, b)
|
||||
|
||||
// Mock the call to the API.
|
||||
m := mocks.NewMockWorkspaceClient(t)
|
||||
dashboardsAPI := m.GetMockLakeviewAPI()
|
||||
dashboardsAPI.EXPECT().
|
||||
GetByDashboardId(mock.Anything, "id1").
|
||||
Return(nil, fmt.Errorf("failure")).
|
||||
Once()
|
||||
b.SetWorkpaceClient(m.WorkspaceClient)
|
||||
|
||||
// Unable to get the dashboard, so expect an error.
|
||||
diags := bundle.Apply(ctx, b, CheckDashboardsModifiedRemotely())
|
||||
if assert.Len(t, diags, 1) {
|
||||
assert.Equal(t, diag.Error, diags[0].Severity)
|
||||
assert.Equal(t, `failed to get dashboard "dash1"`, diags[0].Summary)
|
||||
}
|
||||
}
|
||||
|
||||
func writeFakeDashboardState(t *testing.T, ctx context.Context, b *bundle.Bundle) {
|
||||
tfDir, err := Dir(ctx, b)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Write fake state file.
|
||||
testutil.WriteFile(t, `
|
||||
{
|
||||
"version": 4,
|
||||
"terraform_version": "1.5.5",
|
||||
"resources": [
|
||||
{
|
||||
"mode": "managed",
|
||||
"type": "databricks_dashboard",
|
||||
"name": "dash1",
|
||||
"instances": [
|
||||
{
|
||||
"schema_version": 0,
|
||||
"attributes": {
|
||||
"etag": "1000",
|
||||
"id": "id1"
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"mode": "managed",
|
||||
"type": "databricks_job",
|
||||
"name": "job",
|
||||
"instances": [
|
||||
{
|
||||
"schema_version": 0,
|
||||
"attributes": {
|
||||
"id": "1234"
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"mode": "managed",
|
||||
"type": "databricks_dashboard",
|
||||
"name": "dash2",
|
||||
"instances": [
|
||||
{
|
||||
"schema_version": 0,
|
||||
"attributes": {
|
||||
"etag": "1001",
|
||||
"id": "id2"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
`, filepath.Join(tfDir, TerraformStateFileName))
|
||||
}
|
|
@ -176,6 +176,16 @@ func TerraformToBundle(state *resourcesState, config *config.Root) error {
|
|||
}
|
||||
cur.ID = instance.Attributes.ID
|
||||
config.Resources.Clusters[resource.Name] = cur
|
||||
case "databricks_dashboard":
|
||||
if config.Resources.Dashboards == nil {
|
||||
config.Resources.Dashboards = make(map[string]*resources.Dashboard)
|
||||
}
|
||||
cur := config.Resources.Dashboards[resource.Name]
|
||||
if cur == nil {
|
||||
cur = &resources.Dashboard{ModifiedStatus: resources.ModifiedStatusDeleted}
|
||||
}
|
||||
cur.ID = instance.Attributes.ID
|
||||
config.Resources.Dashboards[resource.Name] = cur
|
||||
case "databricks_permissions":
|
||||
case "databricks_grants":
|
||||
// Ignore; no need to pull these back into the configuration.
|
||||
|
@ -230,6 +240,11 @@ func TerraformToBundle(state *resourcesState, config *config.Root) error {
|
|||
src.ModifiedStatus = resources.ModifiedStatusCreated
|
||||
}
|
||||
}
|
||||
for _, src := range config.Resources.Dashboards {
|
||||
if src.ModifiedStatus == "" && src.ID == "" {
|
||||
src.ModifiedStatus = resources.ModifiedStatusCreated
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -12,6 +12,7 @@ import (
|
|||
"github.com/databricks/cli/libs/dyn/convert"
|
||||
"github.com/databricks/databricks-sdk-go/service/catalog"
|
||||
"github.com/databricks/databricks-sdk-go/service/compute"
|
||||
"github.com/databricks/databricks-sdk-go/service/dashboards"
|
||||
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||
"github.com/databricks/databricks-sdk-go/service/ml"
|
||||
"github.com/databricks/databricks-sdk-go/service/pipelines"
|
||||
|
@ -677,6 +678,14 @@ func TestTerraformToBundleEmptyLocalResources(t *testing.T) {
|
|||
{Attributes: stateInstanceAttributes{ID: "1"}},
|
||||
},
|
||||
},
|
||||
{
|
||||
Type: "databricks_dashboard",
|
||||
Mode: "managed",
|
||||
Name: "test_dashboard",
|
||||
Instances: []stateResourceInstance{
|
||||
{Attributes: stateInstanceAttributes{ID: "1"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
err := TerraformToBundle(&tfState, &config)
|
||||
|
@ -709,6 +718,9 @@ func TestTerraformToBundleEmptyLocalResources(t *testing.T) {
|
|||
assert.Equal(t, "1", config.Resources.Clusters["test_cluster"].ID)
|
||||
assert.Equal(t, resources.ModifiedStatusDeleted, config.Resources.Clusters["test_cluster"].ModifiedStatus)
|
||||
|
||||
assert.Equal(t, "1", config.Resources.Dashboards["test_dashboard"].ID)
|
||||
assert.Equal(t, resources.ModifiedStatusDeleted, config.Resources.Dashboards["test_dashboard"].ModifiedStatus)
|
||||
|
||||
AssertFullResourceCoverage(t, &config)
|
||||
}
|
||||
|
||||
|
@ -778,6 +790,13 @@ func TestTerraformToBundleEmptyRemoteResources(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
Dashboards: map[string]*resources.Dashboard{
|
||||
"test_dashboard": {
|
||||
Dashboard: &dashboards.Dashboard{
|
||||
DisplayName: "test_dashboard",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
var tfState = resourcesState{
|
||||
|
@ -813,6 +832,9 @@ func TestTerraformToBundleEmptyRemoteResources(t *testing.T) {
|
|||
assert.Equal(t, "", config.Resources.Clusters["test_cluster"].ID)
|
||||
assert.Equal(t, resources.ModifiedStatusCreated, config.Resources.Clusters["test_cluster"].ModifiedStatus)
|
||||
|
||||
assert.Equal(t, "", config.Resources.Dashboards["test_dashboard"].ID)
|
||||
assert.Equal(t, resources.ModifiedStatusCreated, config.Resources.Dashboards["test_dashboard"].ModifiedStatus)
|
||||
|
||||
AssertFullResourceCoverage(t, &config)
|
||||
}
|
||||
|
||||
|
@ -927,6 +949,18 @@ func TestTerraformToBundleModifiedResources(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
Dashboards: map[string]*resources.Dashboard{
|
||||
"test_dashboard": {
|
||||
Dashboard: &dashboards.Dashboard{
|
||||
DisplayName: "test_dashboard",
|
||||
},
|
||||
},
|
||||
"test_dashboard_new": {
|
||||
Dashboard: &dashboards.Dashboard{
|
||||
DisplayName: "test_dashboard_new",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
var tfState = resourcesState{
|
||||
|
@ -1075,6 +1109,22 @@ func TestTerraformToBundleModifiedResources(t *testing.T) {
|
|||
{Attributes: stateInstanceAttributes{ID: "2"}},
|
||||
},
|
||||
},
|
||||
{
|
||||
Type: "databricks_dashboard",
|
||||
Mode: "managed",
|
||||
Name: "test_dashboard",
|
||||
Instances: []stateResourceInstance{
|
||||
{Attributes: stateInstanceAttributes{ID: "1"}},
|
||||
},
|
||||
},
|
||||
{
|
||||
Type: "databricks_dashboard",
|
||||
Mode: "managed",
|
||||
Name: "test_dashboard_old",
|
||||
Instances: []stateResourceInstance{
|
||||
{Attributes: stateInstanceAttributes{ID: "2"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
err := TerraformToBundle(&tfState, &config)
|
||||
|
@ -1143,6 +1193,13 @@ func TestTerraformToBundleModifiedResources(t *testing.T) {
|
|||
assert.Equal(t, "", config.Resources.Clusters["test_cluster_new"].ID)
|
||||
assert.Equal(t, resources.ModifiedStatusCreated, config.Resources.Clusters["test_cluster_new"].ModifiedStatus)
|
||||
|
||||
assert.Equal(t, "1", config.Resources.Dashboards["test_dashboard"].ID)
|
||||
assert.Equal(t, "", config.Resources.Dashboards["test_dashboard"].ModifiedStatus)
|
||||
assert.Equal(t, "2", config.Resources.Dashboards["test_dashboard_old"].ID)
|
||||
assert.Equal(t, resources.ModifiedStatusDeleted, config.Resources.Dashboards["test_dashboard_old"].ModifiedStatus)
|
||||
assert.Equal(t, "", config.Resources.Dashboards["test_dashboard_new"].ID)
|
||||
assert.Equal(t, resources.ModifiedStatusCreated, config.Resources.Dashboards["test_dashboard_new"].ModifiedStatus)
|
||||
|
||||
AssertFullResourceCoverage(t, &config)
|
||||
}
|
||||
|
||||
|
|
|
@ -60,6 +60,8 @@ func (m *interpolateMutator) Apply(ctx context.Context, b *bundle.Bundle) diag.D
|
|||
path = dyn.NewPath(dyn.Key("databricks_schema")).Append(path[2:]...)
|
||||
case dyn.Key("clusters"):
|
||||
path = dyn.NewPath(dyn.Key("databricks_cluster")).Append(path[2:]...)
|
||||
case dyn.Key("dashboards"):
|
||||
path = dyn.NewPath(dyn.Key("databricks_dashboard")).Append(path[2:]...)
|
||||
default:
|
||||
// Trigger "key not found" for unknown resource types.
|
||||
return dyn.GetByPath(root, path)
|
||||
|
|
|
@ -32,6 +32,7 @@ func TestInterpolate(t *testing.T) {
|
|||
"other_registered_model": "${resources.registered_models.other_registered_model.id}",
|
||||
"other_schema": "${resources.schemas.other_schema.id}",
|
||||
"other_cluster": "${resources.clusters.other_cluster.id}",
|
||||
"other_dashboard": "${resources.dashboards.other_dashboard.id}",
|
||||
},
|
||||
Tasks: []jobs.Task{
|
||||
{
|
||||
|
@ -69,6 +70,7 @@ func TestInterpolate(t *testing.T) {
|
|||
assert.Equal(t, "${databricks_registered_model.other_registered_model.id}", j.Tags["other_registered_model"])
|
||||
assert.Equal(t, "${databricks_schema.other_schema.id}", j.Tags["other_schema"])
|
||||
assert.Equal(t, "${databricks_cluster.other_cluster.id}", j.Tags["other_cluster"])
|
||||
assert.Equal(t, "${databricks_dashboard.other_dashboard.id}", j.Tags["other_dashboard"])
|
||||
|
||||
m := b.Config.Resources.Models["my_model"]
|
||||
assert.Equal(t, "my_model", m.Model.Name)
|
||||
|
|
|
@ -0,0 +1,109 @@
|
|||
package tfdyn
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/databricks/cli/bundle/internal/tf/schema"
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
"github.com/databricks/cli/libs/dyn/convert"
|
||||
"github.com/databricks/cli/libs/log"
|
||||
)
|
||||
|
||||
const (
|
||||
filePathFieldName = "file_path"
|
||||
serializedDashboardFieldName = "serialized_dashboard"
|
||||
)
|
||||
|
||||
// Marshal "serialized_dashboard" as JSON if it is set in the input but not in the output.
|
||||
func marshalSerializedDashboard(vin dyn.Value, vout dyn.Value) (dyn.Value, error) {
|
||||
// Skip if the "serialized_dashboard" field is already set.
|
||||
if v := vout.Get(serializedDashboardFieldName); v.IsValid() {
|
||||
return vout, nil
|
||||
}
|
||||
|
||||
// Skip if the "serialized_dashboard" field on the input is not set.
|
||||
v := vin.Get(serializedDashboardFieldName)
|
||||
if !v.IsValid() {
|
||||
return vout, nil
|
||||
}
|
||||
|
||||
// Marshal the "serialized_dashboard" field as JSON.
|
||||
data, err := json.Marshal(v.AsAny())
|
||||
if err != nil {
|
||||
return dyn.InvalidValue, fmt.Errorf("failed to marshal serialized_dashboard: %w", err)
|
||||
}
|
||||
|
||||
// Set the "serialized_dashboard" field on the output.
|
||||
return dyn.Set(vout, serializedDashboardFieldName, dyn.V(string(data)))
|
||||
}
|
||||
|
||||
func convertDashboardResource(ctx context.Context, vin dyn.Value) (dyn.Value, error) {
|
||||
var err error
|
||||
|
||||
// Normalize the output value to the target schema.
|
||||
vout, diags := convert.Normalize(schema.ResourceDashboard{}, vin)
|
||||
for _, diag := range diags {
|
||||
log.Debugf(ctx, "dashboard normalization diagnostic: %s", diag.Summary)
|
||||
}
|
||||
|
||||
// Include "serialized_dashboard" field if "file_path" is set.
|
||||
// Note: the Terraform resource supports "file_path" natively, but its
|
||||
// change detection mechanism doesn't work as expected at the time of writing (Sep 30).
|
||||
if path, ok := vout.Get(filePathFieldName).AsString(); ok {
|
||||
vout, err = dyn.Set(vout, serializedDashboardFieldName, dyn.V(fmt.Sprintf("${file(%q)}", path)))
|
||||
if err != nil {
|
||||
return dyn.InvalidValue, fmt.Errorf("failed to set serialized_dashboard: %w", err)
|
||||
}
|
||||
// Drop the "file_path" field. It is mutually exclusive with "serialized_dashboard".
|
||||
vout, err = dyn.Walk(vout, func(p dyn.Path, v dyn.Value) (dyn.Value, error) {
|
||||
switch len(p) {
|
||||
case 0:
|
||||
return v, nil
|
||||
case 1:
|
||||
if p[0] == dyn.Key(filePathFieldName) {
|
||||
return v, dyn.ErrDrop
|
||||
}
|
||||
}
|
||||
|
||||
// Skip everything else.
|
||||
return v, dyn.ErrSkip
|
||||
})
|
||||
if err != nil {
|
||||
return dyn.InvalidValue, fmt.Errorf("failed to drop file_path: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Marshal "serialized_dashboard" as JSON if it is set in the input but not in the output.
|
||||
vout, err = marshalSerializedDashboard(vin, vout)
|
||||
if err != nil {
|
||||
return dyn.InvalidValue, err
|
||||
}
|
||||
|
||||
return vout, nil
|
||||
}
|
||||
|
||||
type dashboardConverter struct{}
|
||||
|
||||
func (dashboardConverter) Convert(ctx context.Context, key string, vin dyn.Value, out *schema.Resources) error {
|
||||
vout, err := convertDashboardResource(ctx, vin)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Add the converted resource to the output.
|
||||
out.Dashboard[key] = vout.AsAny()
|
||||
|
||||
// Configure permissions for this resource.
|
||||
if permissions := convertPermissionsResource(ctx, vin); permissions != nil {
|
||||
permissions.DashboardId = fmt.Sprintf("${databricks_dashboard.%s.id}", key)
|
||||
out.Permissions["dashboard_"+key] = permissions
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
registerConverter("dashboards", dashboardConverter{})
|
||||
}
|
|
@ -0,0 +1,153 @@
|
|||
package tfdyn
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/bundle/config/resources"
|
||||
"github.com/databricks/cli/bundle/internal/tf/schema"
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
"github.com/databricks/cli/libs/dyn/convert"
|
||||
"github.com/databricks/databricks-sdk-go/service/dashboards"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestConvertDashboard(t *testing.T) {
|
||||
var src = resources.Dashboard{
|
||||
Dashboard: &dashboards.Dashboard{
|
||||
DisplayName: "my dashboard",
|
||||
WarehouseId: "f00dcafe",
|
||||
ParentPath: "/some/path",
|
||||
},
|
||||
|
||||
EmbedCredentials: true,
|
||||
|
||||
Permissions: []resources.Permission{
|
||||
{
|
||||
Level: "CAN_VIEW",
|
||||
UserName: "jane@doe.com",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
vin, err := convert.FromTyped(src, dyn.NilValue)
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
out := schema.NewResources()
|
||||
err = dashboardConverter{}.Convert(ctx, "my_dashboard", vin, out)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Assert equality on the job
|
||||
assert.Equal(t, map[string]any{
|
||||
"display_name": "my dashboard",
|
||||
"warehouse_id": "f00dcafe",
|
||||
"parent_path": "/some/path",
|
||||
"embed_credentials": true,
|
||||
}, out.Dashboard["my_dashboard"])
|
||||
|
||||
// Assert equality on the permissions
|
||||
assert.Equal(t, &schema.ResourcePermissions{
|
||||
DashboardId: "${databricks_dashboard.my_dashboard.id}",
|
||||
AccessControl: []schema.ResourcePermissionsAccessControl{
|
||||
{
|
||||
PermissionLevel: "CAN_VIEW",
|
||||
UserName: "jane@doe.com",
|
||||
},
|
||||
},
|
||||
}, out.Permissions["dashboard_my_dashboard"])
|
||||
}
|
||||
|
||||
func TestConvertDashboardFilePath(t *testing.T) {
|
||||
var src = resources.Dashboard{
|
||||
FilePath: "some/path",
|
||||
}
|
||||
|
||||
vin, err := convert.FromTyped(src, dyn.NilValue)
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
out := schema.NewResources()
|
||||
err = dashboardConverter{}.Convert(ctx, "my_dashboard", vin, out)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Assert that the "serialized_dashboard" is included.
|
||||
assert.Subset(t, out.Dashboard["my_dashboard"], map[string]any{
|
||||
"serialized_dashboard": "${file(\"some/path\")}",
|
||||
})
|
||||
|
||||
// Assert that the "file_path" doesn't carry over.
|
||||
assert.NotSubset(t, out.Dashboard["my_dashboard"], map[string]any{
|
||||
"file_path": "some/path",
|
||||
})
|
||||
}
|
||||
|
||||
func TestConvertDashboardFilePathQuoted(t *testing.T) {
|
||||
var src = resources.Dashboard{
|
||||
FilePath: `C:\foo\bar\baz\dashboard.lvdash.json`,
|
||||
}
|
||||
|
||||
vin, err := convert.FromTyped(src, dyn.NilValue)
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
out := schema.NewResources()
|
||||
err = dashboardConverter{}.Convert(ctx, "my_dashboard", vin, out)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Assert that the "serialized_dashboard" is included.
|
||||
assert.Subset(t, out.Dashboard["my_dashboard"], map[string]any{
|
||||
"serialized_dashboard": `${file("C:\\foo\\bar\\baz\\dashboard.lvdash.json")}`,
|
||||
})
|
||||
|
||||
// Assert that the "file_path" doesn't carry over.
|
||||
assert.NotSubset(t, out.Dashboard["my_dashboard"], map[string]any{
|
||||
"file_path": `C:\foo\bar\baz\dashboard.lvdash.json`,
|
||||
})
|
||||
}
|
||||
|
||||
func TestConvertDashboardSerializedDashboardString(t *testing.T) {
|
||||
var src = resources.Dashboard{
|
||||
SerializedDashboard: `{ "json": true }`,
|
||||
}
|
||||
|
||||
vin, err := convert.FromTyped(src, dyn.NilValue)
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
out := schema.NewResources()
|
||||
err = dashboardConverter{}.Convert(ctx, "my_dashboard", vin, out)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Assert that the "serialized_dashboard" is included.
|
||||
assert.Subset(t, out.Dashboard["my_dashboard"], map[string]any{
|
||||
"serialized_dashboard": `{ "json": true }`,
|
||||
})
|
||||
}
|
||||
|
||||
func TestConvertDashboardSerializedDashboardAny(t *testing.T) {
|
||||
var src = resources.Dashboard{
|
||||
SerializedDashboard: map[string]any{
|
||||
"pages": []map[string]any{
|
||||
{
|
||||
"displayName": "New Page",
|
||||
"layout": []map[string]any{},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
vin, err := convert.FromTyped(src, dyn.NilValue)
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
out := schema.NewResources()
|
||||
err = dashboardConverter{}.Convert(ctx, "my_dashboard", vin, out)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Assert that the "serialized_dashboard" is included.
|
||||
assert.Subset(t, out.Dashboard["my_dashboard"], map[string]any{
|
||||
"serialized_dashboard": `{"pages":[{"displayName":"New Page","layout":[]}]}`,
|
||||
})
|
||||
}
|
|
@ -15,8 +15,8 @@ import (
|
|||
|
||||
func TestConvertQualityMonitor(t *testing.T) {
|
||||
var src = resources.QualityMonitor{
|
||||
TableName: "test_table_name",
|
||||
CreateMonitor: &catalog.CreateMonitor{
|
||||
TableName: "test_table_name",
|
||||
AssetsDir: "assets_dir",
|
||||
OutputSchemaName: "output_schema_name",
|
||||
InferenceLog: &catalog.MonitorInferenceLog{
|
||||
|
|
|
@ -13,7 +13,7 @@ import (
|
|||
|
||||
// Partial representation of the Terraform state file format.
|
||||
// We are only interested global version and serial numbers,
|
||||
// plus resource types, names, modes, and ids.
|
||||
// plus resource types, names, modes, IDs, and ETags (for dashboards).
|
||||
type resourcesState struct {
|
||||
Version int `json:"version"`
|
||||
Resources []stateResource `json:"resources"`
|
||||
|
@ -33,7 +33,8 @@ type stateResourceInstance struct {
|
|||
}
|
||||
|
||||
type stateInstanceAttributes struct {
|
||||
ID string `json:"id"`
|
||||
ID string `json:"id"`
|
||||
ETag string `json:"etag,omitempty"`
|
||||
}
|
||||
|
||||
func ParseResourcesState(ctx context.Context, b *bundle.Bundle) (*resourcesState, error) {
|
||||
|
|
|
@ -0,0 +1,20 @@
|
|||
package bundletest
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func Mutate(t *testing.T, b *bundle.Bundle, f func(v dyn.Value) (dyn.Value, error)) {
|
||||
diags := bundle.ApplyFunc(context.Background(), b, func(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||
err := b.Config.Mutate(f)
|
||||
require.NoError(t, err)
|
||||
return nil
|
||||
})
|
||||
require.NoError(t, diags.Error())
|
||||
}
|
|
@ -4,6 +4,7 @@ bundle:
|
|||
resources:
|
||||
quality_monitors:
|
||||
myqualitymonitor:
|
||||
table_name: catalog.schema.quality_monitor
|
||||
inference_log:
|
||||
granularities:
|
||||
- a
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
package schema
|
||||
|
||||
const ProviderVersion = "1.53.0"
|
||||
const ProviderVersion = "1.58.0"
|
||||
|
|
|
@ -0,0 +1,98 @@
|
|||
// Generated from Databricks Terraform provider schema. DO NOT EDIT.
|
||||
|
||||
package schema
|
||||
|
||||
type DataSourceFunctionsFunctionsInputParamsParameters struct {
|
||||
Comment string `json:"comment,omitempty"`
|
||||
Name string `json:"name"`
|
||||
ParameterDefault string `json:"parameter_default,omitempty"`
|
||||
ParameterMode string `json:"parameter_mode,omitempty"`
|
||||
ParameterType string `json:"parameter_type,omitempty"`
|
||||
Position int `json:"position"`
|
||||
TypeIntervalType string `json:"type_interval_type,omitempty"`
|
||||
TypeJson string `json:"type_json,omitempty"`
|
||||
TypeName string `json:"type_name"`
|
||||
TypePrecision int `json:"type_precision,omitempty"`
|
||||
TypeScale int `json:"type_scale,omitempty"`
|
||||
TypeText string `json:"type_text"`
|
||||
}
|
||||
|
||||
type DataSourceFunctionsFunctionsInputParams struct {
|
||||
Parameters []DataSourceFunctionsFunctionsInputParamsParameters `json:"parameters,omitempty"`
|
||||
}
|
||||
|
||||
type DataSourceFunctionsFunctionsReturnParamsParameters struct {
|
||||
Comment string `json:"comment,omitempty"`
|
||||
Name string `json:"name"`
|
||||
ParameterDefault string `json:"parameter_default,omitempty"`
|
||||
ParameterMode string `json:"parameter_mode,omitempty"`
|
||||
ParameterType string `json:"parameter_type,omitempty"`
|
||||
Position int `json:"position"`
|
||||
TypeIntervalType string `json:"type_interval_type,omitempty"`
|
||||
TypeJson string `json:"type_json,omitempty"`
|
||||
TypeName string `json:"type_name"`
|
||||
TypePrecision int `json:"type_precision,omitempty"`
|
||||
TypeScale int `json:"type_scale,omitempty"`
|
||||
TypeText string `json:"type_text"`
|
||||
}
|
||||
|
||||
type DataSourceFunctionsFunctionsReturnParams struct {
|
||||
Parameters []DataSourceFunctionsFunctionsReturnParamsParameters `json:"parameters,omitempty"`
|
||||
}
|
||||
|
||||
type DataSourceFunctionsFunctionsRoutineDependenciesDependenciesFunction struct {
|
||||
FunctionFullName string `json:"function_full_name"`
|
||||
}
|
||||
|
||||
type DataSourceFunctionsFunctionsRoutineDependenciesDependenciesTable struct {
|
||||
TableFullName string `json:"table_full_name"`
|
||||
}
|
||||
|
||||
type DataSourceFunctionsFunctionsRoutineDependenciesDependencies struct {
|
||||
Function []DataSourceFunctionsFunctionsRoutineDependenciesDependenciesFunction `json:"function,omitempty"`
|
||||
Table []DataSourceFunctionsFunctionsRoutineDependenciesDependenciesTable `json:"table,omitempty"`
|
||||
}
|
||||
|
||||
type DataSourceFunctionsFunctionsRoutineDependencies struct {
|
||||
Dependencies []DataSourceFunctionsFunctionsRoutineDependenciesDependencies `json:"dependencies,omitempty"`
|
||||
}
|
||||
|
||||
type DataSourceFunctionsFunctions struct {
|
||||
BrowseOnly bool `json:"browse_only,omitempty"`
|
||||
CatalogName string `json:"catalog_name,omitempty"`
|
||||
Comment string `json:"comment,omitempty"`
|
||||
CreatedAt int `json:"created_at,omitempty"`
|
||||
CreatedBy string `json:"created_by,omitempty"`
|
||||
DataType string `json:"data_type,omitempty"`
|
||||
ExternalLanguage string `json:"external_language,omitempty"`
|
||||
ExternalName string `json:"external_name,omitempty"`
|
||||
FullDataType string `json:"full_data_type,omitempty"`
|
||||
FullName string `json:"full_name,omitempty"`
|
||||
FunctionId string `json:"function_id,omitempty"`
|
||||
IsDeterministic bool `json:"is_deterministic,omitempty"`
|
||||
IsNullCall bool `json:"is_null_call,omitempty"`
|
||||
MetastoreId string `json:"metastore_id,omitempty"`
|
||||
Name string `json:"name,omitempty"`
|
||||
Owner string `json:"owner,omitempty"`
|
||||
ParameterStyle string `json:"parameter_style,omitempty"`
|
||||
Properties string `json:"properties,omitempty"`
|
||||
RoutineBody string `json:"routine_body,omitempty"`
|
||||
RoutineDefinition string `json:"routine_definition,omitempty"`
|
||||
SchemaName string `json:"schema_name,omitempty"`
|
||||
SecurityType string `json:"security_type,omitempty"`
|
||||
SpecificName string `json:"specific_name,omitempty"`
|
||||
SqlDataAccess string `json:"sql_data_access,omitempty"`
|
||||
SqlPath string `json:"sql_path,omitempty"`
|
||||
UpdatedAt int `json:"updated_at,omitempty"`
|
||||
UpdatedBy string `json:"updated_by,omitempty"`
|
||||
InputParams []DataSourceFunctionsFunctionsInputParams `json:"input_params,omitempty"`
|
||||
ReturnParams []DataSourceFunctionsFunctionsReturnParams `json:"return_params,omitempty"`
|
||||
RoutineDependencies []DataSourceFunctionsFunctionsRoutineDependencies `json:"routine_dependencies,omitempty"`
|
||||
}
|
||||
|
||||
type DataSourceFunctions struct {
|
||||
CatalogName string `json:"catalog_name"`
|
||||
IncludeBrowse bool `json:"include_browse,omitempty"`
|
||||
SchemaName string `json:"schema_name"`
|
||||
Functions []DataSourceFunctionsFunctions `json:"functions,omitempty"`
|
||||
}
|
|
@ -0,0 +1,15 @@
|
|||
// Generated from Databricks Terraform provider schema. DO NOT EDIT.
|
||||
|
||||
package schema
|
||||
|
||||
type DataSourceNotificationDestinationsNotificationDestinations struct {
|
||||
DestinationType string `json:"destination_type,omitempty"`
|
||||
DisplayName string `json:"display_name,omitempty"`
|
||||
Id string `json:"id,omitempty"`
|
||||
}
|
||||
|
||||
type DataSourceNotificationDestinations struct {
|
||||
DisplayNameContains string `json:"display_name_contains,omitempty"`
|
||||
Type string `json:"type,omitempty"`
|
||||
NotificationDestinations []DataSourceNotificationDestinationsNotificationDestinations `json:"notification_destinations,omitempty"`
|
||||
}
|
|
@ -0,0 +1,32 @@
|
|||
// Generated from Databricks Terraform provider schema. DO NOT EDIT.
|
||||
|
||||
package schema
|
||||
|
||||
type DataSourceRegisteredModelModelInfoAliases struct {
|
||||
AliasName string `json:"alias_name,omitempty"`
|
||||
VersionNum int `json:"version_num,omitempty"`
|
||||
}
|
||||
|
||||
type DataSourceRegisteredModelModelInfo struct {
|
||||
BrowseOnly bool `json:"browse_only,omitempty"`
|
||||
CatalogName string `json:"catalog_name,omitempty"`
|
||||
Comment string `json:"comment,omitempty"`
|
||||
CreatedAt int `json:"created_at,omitempty"`
|
||||
CreatedBy string `json:"created_by,omitempty"`
|
||||
FullName string `json:"full_name,omitempty"`
|
||||
MetastoreId string `json:"metastore_id,omitempty"`
|
||||
Name string `json:"name,omitempty"`
|
||||
Owner string `json:"owner,omitempty"`
|
||||
SchemaName string `json:"schema_name,omitempty"`
|
||||
StorageLocation string `json:"storage_location,omitempty"`
|
||||
UpdatedAt int `json:"updated_at,omitempty"`
|
||||
UpdatedBy string `json:"updated_by,omitempty"`
|
||||
Aliases []DataSourceRegisteredModelModelInfoAliases `json:"aliases,omitempty"`
|
||||
}
|
||||
|
||||
type DataSourceRegisteredModel struct {
|
||||
FullName string `json:"full_name"`
|
||||
IncludeAliases bool `json:"include_aliases,omitempty"`
|
||||
IncludeBrowse bool `json:"include_browse,omitempty"`
|
||||
ModelInfo []DataSourceRegisteredModelModelInfo `json:"model_info,omitempty"`
|
||||
}
|
|
@ -35,6 +35,7 @@ type DataSourceStorageCredentialStorageCredentialInfo struct {
|
|||
Comment string `json:"comment,omitempty"`
|
||||
CreatedAt int `json:"created_at,omitempty"`
|
||||
CreatedBy string `json:"created_by,omitempty"`
|
||||
FullName string `json:"full_name,omitempty"`
|
||||
Id string `json:"id,omitempty"`
|
||||
IsolationMode string `json:"isolation_mode,omitempty"`
|
||||
MetastoreId string `json:"metastore_id,omitempty"`
|
||||
|
|
|
@ -4,7 +4,6 @@ package schema
|
|||
|
||||
type DataSourceVolumes struct {
|
||||
CatalogName string `json:"catalog_name"`
|
||||
Id string `json:"id,omitempty"`
|
||||
Ids []string `json:"ids,omitempty"`
|
||||
SchemaName string `json:"schema_name"`
|
||||
}
|
||||
|
|
|
@ -21,6 +21,7 @@ type DataSources struct {
|
|||
Directory map[string]any `json:"databricks_directory,omitempty"`
|
||||
ExternalLocation map[string]any `json:"databricks_external_location,omitempty"`
|
||||
ExternalLocations map[string]any `json:"databricks_external_locations,omitempty"`
|
||||
Functions map[string]any `json:"databricks_functions,omitempty"`
|
||||
Group map[string]any `json:"databricks_group,omitempty"`
|
||||
InstancePool map[string]any `json:"databricks_instance_pool,omitempty"`
|
||||
InstanceProfiles map[string]any `json:"databricks_instance_profiles,omitempty"`
|
||||
|
@ -36,7 +37,9 @@ type DataSources struct {
|
|||
NodeType map[string]any `json:"databricks_node_type,omitempty"`
|
||||
Notebook map[string]any `json:"databricks_notebook,omitempty"`
|
||||
NotebookPaths map[string]any `json:"databricks_notebook_paths,omitempty"`
|
||||
NotificationDestinations map[string]any `json:"databricks_notification_destinations,omitempty"`
|
||||
Pipelines map[string]any `json:"databricks_pipelines,omitempty"`
|
||||
RegisteredModel map[string]any `json:"databricks_registered_model,omitempty"`
|
||||
Schema map[string]any `json:"databricks_schema,omitempty"`
|
||||
Schemas map[string]any `json:"databricks_schemas,omitempty"`
|
||||
ServicePrincipal map[string]any `json:"databricks_service_principal,omitempty"`
|
||||
|
@ -77,6 +80,7 @@ func NewDataSources() *DataSources {
|
|||
Directory: make(map[string]any),
|
||||
ExternalLocation: make(map[string]any),
|
||||
ExternalLocations: make(map[string]any),
|
||||
Functions: make(map[string]any),
|
||||
Group: make(map[string]any),
|
||||
InstancePool: make(map[string]any),
|
||||
InstanceProfiles: make(map[string]any),
|
||||
|
@ -92,7 +96,9 @@ func NewDataSources() *DataSources {
|
|||
NodeType: make(map[string]any),
|
||||
Notebook: make(map[string]any),
|
||||
NotebookPaths: make(map[string]any),
|
||||
NotificationDestinations: make(map[string]any),
|
||||
Pipelines: make(map[string]any),
|
||||
RegisteredModel: make(map[string]any),
|
||||
Schema: make(map[string]any),
|
||||
Schemas: make(map[string]any),
|
||||
ServicePrincipal: make(map[string]any),
|
||||
|
|
|
@ -0,0 +1,46 @@
|
|||
// Generated from Databricks Terraform provider schema. DO NOT EDIT.
|
||||
|
||||
package schema
|
||||
|
||||
type ResourceAlertConditionOperandColumn struct {
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
type ResourceAlertConditionOperand struct {
|
||||
Column *ResourceAlertConditionOperandColumn `json:"column,omitempty"`
|
||||
}
|
||||
|
||||
type ResourceAlertConditionThresholdValue struct {
|
||||
BoolValue bool `json:"bool_value,omitempty"`
|
||||
DoubleValue int `json:"double_value,omitempty"`
|
||||
StringValue string `json:"string_value,omitempty"`
|
||||
}
|
||||
|
||||
type ResourceAlertConditionThreshold struct {
|
||||
Value *ResourceAlertConditionThresholdValue `json:"value,omitempty"`
|
||||
}
|
||||
|
||||
type ResourceAlertCondition struct {
|
||||
EmptyResultState string `json:"empty_result_state,omitempty"`
|
||||
Op string `json:"op"`
|
||||
Operand *ResourceAlertConditionOperand `json:"operand,omitempty"`
|
||||
Threshold *ResourceAlertConditionThreshold `json:"threshold,omitempty"`
|
||||
}
|
||||
|
||||
type ResourceAlert struct {
|
||||
CreateTime string `json:"create_time,omitempty"`
|
||||
CustomBody string `json:"custom_body,omitempty"`
|
||||
CustomSubject string `json:"custom_subject,omitempty"`
|
||||
DisplayName string `json:"display_name"`
|
||||
Id string `json:"id,omitempty"`
|
||||
LifecycleState string `json:"lifecycle_state,omitempty"`
|
||||
NotifyOnOk bool `json:"notify_on_ok,omitempty"`
|
||||
OwnerUserName string `json:"owner_user_name,omitempty"`
|
||||
ParentPath string `json:"parent_path,omitempty"`
|
||||
QueryId string `json:"query_id"`
|
||||
SecondsToRetrigger int `json:"seconds_to_retrigger,omitempty"`
|
||||
State string `json:"state,omitempty"`
|
||||
TriggerTime string `json:"trigger_time,omitempty"`
|
||||
UpdateTime string `json:"update_time,omitempty"`
|
||||
Condition *ResourceAlertCondition `json:"condition,omitempty"`
|
||||
}
|
|
@ -0,0 +1,23 @@
|
|||
// Generated from Databricks Terraform provider schema. DO NOT EDIT.
|
||||
|
||||
package schema
|
||||
|
||||
type ResourceCustomAppIntegrationTokenAccessPolicy struct {
|
||||
AccessTokenTtlInMinutes int `json:"access_token_ttl_in_minutes,omitempty"`
|
||||
RefreshTokenTtlInMinutes int `json:"refresh_token_ttl_in_minutes,omitempty"`
|
||||
}
|
||||
|
||||
type ResourceCustomAppIntegration struct {
|
||||
ClientId string `json:"client_id,omitempty"`
|
||||
ClientSecret string `json:"client_secret,omitempty"`
|
||||
Confidential bool `json:"confidential,omitempty"`
|
||||
CreateTime string `json:"create_time,omitempty"`
|
||||
CreatedBy int `json:"created_by,omitempty"`
|
||||
CreatorUsername string `json:"creator_username,omitempty"`
|
||||
Id string `json:"id,omitempty"`
|
||||
IntegrationId string `json:"integration_id,omitempty"`
|
||||
Name string `json:"name,omitempty"`
|
||||
RedirectUrls []string `json:"redirect_urls,omitempty"`
|
||||
Scopes []string `json:"scopes,omitempty"`
|
||||
TokenAccessPolicy *ResourceCustomAppIntegrationTokenAccessPolicy `json:"token_access_policy,omitempty"`
|
||||
}
|
|
@ -1448,6 +1448,7 @@ type ResourceJobWebhookNotifications struct {
|
|||
|
||||
type ResourceJob struct {
|
||||
AlwaysRunning bool `json:"always_running,omitempty"`
|
||||
BudgetPolicyId string `json:"budget_policy_id,omitempty"`
|
||||
ControlRunState bool `json:"control_run_state,omitempty"`
|
||||
Description string `json:"description,omitempty"`
|
||||
EditMode string `json:"edit_mode,omitempty"`
|
||||
|
|
|
@ -19,13 +19,13 @@ type ResourceLibraryPypi struct {
|
|||
}
|
||||
|
||||
type ResourceLibrary struct {
|
||||
ClusterId string `json:"cluster_id"`
|
||||
Egg string `json:"egg,omitempty"`
|
||||
Id string `json:"id,omitempty"`
|
||||
Jar string `json:"jar,omitempty"`
|
||||
Requirements string `json:"requirements,omitempty"`
|
||||
Whl string `json:"whl,omitempty"`
|
||||
Cran *ResourceLibraryCran `json:"cran,omitempty"`
|
||||
Maven *ResourceLibraryMaven `json:"maven,omitempty"`
|
||||
Pypi *ResourceLibraryPypi `json:"pypi,omitempty"`
|
||||
ClusterId string `json:"cluster_id"`
|
||||
Egg string `json:"egg,omitempty"`
|
||||
Id string `json:"id,omitempty"`
|
||||
Jar string `json:"jar,omitempty"`
|
||||
Requirements string `json:"requirements,omitempty"`
|
||||
Whl string `json:"whl,omitempty"`
|
||||
Cran []ResourceLibraryCran `json:"cran,omitempty"`
|
||||
Maven []ResourceLibraryMaven `json:"maven,omitempty"`
|
||||
Pypi []ResourceLibraryPypi `json:"pypi,omitempty"`
|
||||
}
|
||||
|
|
|
@ -19,9 +19,10 @@ type ResourceOnlineTableSpec struct {
|
|||
}
|
||||
|
||||
type ResourceOnlineTable struct {
|
||||
Id string `json:"id,omitempty"`
|
||||
Name string `json:"name"`
|
||||
Status []any `json:"status,omitempty"`
|
||||
TableServingUrl string `json:"table_serving_url,omitempty"`
|
||||
Spec *ResourceOnlineTableSpec `json:"spec,omitempty"`
|
||||
Id string `json:"id,omitempty"`
|
||||
Name string `json:"name"`
|
||||
Status []any `json:"status,omitempty"`
|
||||
TableServingUrl string `json:"table_serving_url,omitempty"`
|
||||
UnityCatalogProvisioningState string `json:"unity_catalog_provisioning_state,omitempty"`
|
||||
Spec *ResourceOnlineTableSpec `json:"spec,omitempty"`
|
||||
}
|
||||
|
|
|
@ -137,15 +137,32 @@ type ResourcePipelineFilters struct {
|
|||
|
||||
type ResourcePipelineGatewayDefinition struct {
|
||||
ConnectionId string `json:"connection_id,omitempty"`
|
||||
ConnectionName string `json:"connection_name,omitempty"`
|
||||
GatewayStorageCatalog string `json:"gateway_storage_catalog,omitempty"`
|
||||
GatewayStorageName string `json:"gateway_storage_name,omitempty"`
|
||||
GatewayStorageSchema string `json:"gateway_storage_schema,omitempty"`
|
||||
}
|
||||
|
||||
type ResourcePipelineIngestionDefinitionObjectsReportTableConfiguration struct {
|
||||
PrimaryKeys []string `json:"primary_keys,omitempty"`
|
||||
SalesforceIncludeFormulaFields bool `json:"salesforce_include_formula_fields,omitempty"`
|
||||
ScdType string `json:"scd_type,omitempty"`
|
||||
SequenceBy []string `json:"sequence_by,omitempty"`
|
||||
}
|
||||
|
||||
type ResourcePipelineIngestionDefinitionObjectsReport struct {
|
||||
DestinationCatalog string `json:"destination_catalog,omitempty"`
|
||||
DestinationSchema string `json:"destination_schema,omitempty"`
|
||||
DestinationTable string `json:"destination_table,omitempty"`
|
||||
SourceUrl string `json:"source_url,omitempty"`
|
||||
TableConfiguration *ResourcePipelineIngestionDefinitionObjectsReportTableConfiguration `json:"table_configuration,omitempty"`
|
||||
}
|
||||
|
||||
type ResourcePipelineIngestionDefinitionObjectsSchemaTableConfiguration struct {
|
||||
PrimaryKeys []string `json:"primary_keys,omitempty"`
|
||||
SalesforceIncludeFormulaFields bool `json:"salesforce_include_formula_fields,omitempty"`
|
||||
ScdType string `json:"scd_type,omitempty"`
|
||||
SequenceBy []string `json:"sequence_by,omitempty"`
|
||||
}
|
||||
|
||||
type ResourcePipelineIngestionDefinitionObjectsSchema struct {
|
||||
|
@ -160,6 +177,7 @@ type ResourcePipelineIngestionDefinitionObjectsTableTableConfiguration struct {
|
|||
PrimaryKeys []string `json:"primary_keys,omitempty"`
|
||||
SalesforceIncludeFormulaFields bool `json:"salesforce_include_formula_fields,omitempty"`
|
||||
ScdType string `json:"scd_type,omitempty"`
|
||||
SequenceBy []string `json:"sequence_by,omitempty"`
|
||||
}
|
||||
|
||||
type ResourcePipelineIngestionDefinitionObjectsTable struct {
|
||||
|
@ -173,6 +191,7 @@ type ResourcePipelineIngestionDefinitionObjectsTable struct {
|
|||
}
|
||||
|
||||
type ResourcePipelineIngestionDefinitionObjects struct {
|
||||
Report *ResourcePipelineIngestionDefinitionObjectsReport `json:"report,omitempty"`
|
||||
Schema *ResourcePipelineIngestionDefinitionObjectsSchema `json:"schema,omitempty"`
|
||||
Table *ResourcePipelineIngestionDefinitionObjectsTable `json:"table,omitempty"`
|
||||
}
|
||||
|
@ -181,6 +200,7 @@ type ResourcePipelineIngestionDefinitionTableConfiguration struct {
|
|||
PrimaryKeys []string `json:"primary_keys,omitempty"`
|
||||
SalesforceIncludeFormulaFields bool `json:"salesforce_include_formula_fields,omitempty"`
|
||||
ScdType string `json:"scd_type,omitempty"`
|
||||
SequenceBy []string `json:"sequence_by,omitempty"`
|
||||
}
|
||||
|
||||
type ResourcePipelineIngestionDefinition struct {
|
||||
|
@ -223,6 +243,12 @@ type ResourcePipelineNotification struct {
|
|||
EmailRecipients []string `json:"email_recipients,omitempty"`
|
||||
}
|
||||
|
||||
type ResourcePipelineRestartWindow struct {
|
||||
DaysOfWeek string `json:"days_of_week,omitempty"`
|
||||
StartHour int `json:"start_hour"`
|
||||
TimeZoneId string `json:"time_zone_id,omitempty"`
|
||||
}
|
||||
|
||||
type ResourcePipelineTriggerCron struct {
|
||||
QuartzCronSchedule string `json:"quartz_cron_schedule,omitempty"`
|
||||
TimezoneId string `json:"timezone_id,omitempty"`
|
||||
|
@ -269,5 +295,6 @@ type ResourcePipeline struct {
|
|||
LatestUpdates []ResourcePipelineLatestUpdates `json:"latest_updates,omitempty"`
|
||||
Library []ResourcePipelineLibrary `json:"library,omitempty"`
|
||||
Notification []ResourcePipelineNotification `json:"notification,omitempty"`
|
||||
RestartWindow *ResourcePipelineRestartWindow `json:"restart_window,omitempty"`
|
||||
Trigger *ResourcePipelineTrigger `json:"trigger,omitempty"`
|
||||
}
|
||||
|
|
|
@ -0,0 +1,84 @@
|
|||
// Generated from Databricks Terraform provider schema. DO NOT EDIT.
|
||||
|
||||
package schema
|
||||
|
||||
type ResourceQueryParameterDateRangeValueDateRangeValue struct {
|
||||
End string `json:"end"`
|
||||
Start string `json:"start"`
|
||||
}
|
||||
|
||||
type ResourceQueryParameterDateRangeValue struct {
|
||||
DynamicDateRangeValue string `json:"dynamic_date_range_value,omitempty"`
|
||||
Precision string `json:"precision,omitempty"`
|
||||
StartDayOfWeek int `json:"start_day_of_week,omitempty"`
|
||||
DateRangeValue *ResourceQueryParameterDateRangeValueDateRangeValue `json:"date_range_value,omitempty"`
|
||||
}
|
||||
|
||||
type ResourceQueryParameterDateValue struct {
|
||||
DateValue string `json:"date_value,omitempty"`
|
||||
DynamicDateValue string `json:"dynamic_date_value,omitempty"`
|
||||
Precision string `json:"precision,omitempty"`
|
||||
}
|
||||
|
||||
type ResourceQueryParameterEnumValueMultiValuesOptions struct {
|
||||
Prefix string `json:"prefix,omitempty"`
|
||||
Separator string `json:"separator,omitempty"`
|
||||
Suffix string `json:"suffix,omitempty"`
|
||||
}
|
||||
|
||||
type ResourceQueryParameterEnumValue struct {
|
||||
EnumOptions string `json:"enum_options,omitempty"`
|
||||
Values []string `json:"values,omitempty"`
|
||||
MultiValuesOptions *ResourceQueryParameterEnumValueMultiValuesOptions `json:"multi_values_options,omitempty"`
|
||||
}
|
||||
|
||||
type ResourceQueryParameterNumericValue struct {
|
||||
Value int `json:"value"`
|
||||
}
|
||||
|
||||
type ResourceQueryParameterQueryBackedValueMultiValuesOptions struct {
|
||||
Prefix string `json:"prefix,omitempty"`
|
||||
Separator string `json:"separator,omitempty"`
|
||||
Suffix string `json:"suffix,omitempty"`
|
||||
}
|
||||
|
||||
type ResourceQueryParameterQueryBackedValue struct {
|
||||
QueryId string `json:"query_id"`
|
||||
Values []string `json:"values,omitempty"`
|
||||
MultiValuesOptions *ResourceQueryParameterQueryBackedValueMultiValuesOptions `json:"multi_values_options,omitempty"`
|
||||
}
|
||||
|
||||
type ResourceQueryParameterTextValue struct {
|
||||
Value string `json:"value"`
|
||||
}
|
||||
|
||||
type ResourceQueryParameter struct {
|
||||
Name string `json:"name"`
|
||||
Title string `json:"title,omitempty"`
|
||||
DateRangeValue *ResourceQueryParameterDateRangeValue `json:"date_range_value,omitempty"`
|
||||
DateValue *ResourceQueryParameterDateValue `json:"date_value,omitempty"`
|
||||
EnumValue *ResourceQueryParameterEnumValue `json:"enum_value,omitempty"`
|
||||
NumericValue *ResourceQueryParameterNumericValue `json:"numeric_value,omitempty"`
|
||||
QueryBackedValue *ResourceQueryParameterQueryBackedValue `json:"query_backed_value,omitempty"`
|
||||
TextValue *ResourceQueryParameterTextValue `json:"text_value,omitempty"`
|
||||
}
|
||||
|
||||
type ResourceQuery struct {
|
||||
ApplyAutoLimit bool `json:"apply_auto_limit,omitempty"`
|
||||
Catalog string `json:"catalog,omitempty"`
|
||||
CreateTime string `json:"create_time,omitempty"`
|
||||
Description string `json:"description,omitempty"`
|
||||
DisplayName string `json:"display_name"`
|
||||
Id string `json:"id,omitempty"`
|
||||
LastModifierUserName string `json:"last_modifier_user_name,omitempty"`
|
||||
LifecycleState string `json:"lifecycle_state,omitempty"`
|
||||
OwnerUserName string `json:"owner_user_name,omitempty"`
|
||||
ParentPath string `json:"parent_path,omitempty"`
|
||||
QueryText string `json:"query_text"`
|
||||
RunAsMode string `json:"run_as_mode,omitempty"`
|
||||
Schema string `json:"schema,omitempty"`
|
||||
Tags []string `json:"tags,omitempty"`
|
||||
UpdateTime string `json:"update_time,omitempty"`
|
||||
WarehouseId string `json:"warehouse_id"`
|
||||
Parameter []ResourceQueryParameter `json:"parameter,omitempty"`
|
||||
}
|
|
@ -4,6 +4,7 @@ package schema
|
|||
|
||||
type Resources struct {
|
||||
AccessControlRuleSet map[string]any `json:"databricks_access_control_rule_set,omitempty"`
|
||||
Alert map[string]any `json:"databricks_alert,omitempty"`
|
||||
ArtifactAllowlist map[string]any `json:"databricks_artifact_allowlist,omitempty"`
|
||||
AutomaticClusterUpdateWorkspaceSetting map[string]any `json:"databricks_automatic_cluster_update_workspace_setting,omitempty"`
|
||||
AwsS3Mount map[string]any `json:"databricks_aws_s3_mount,omitempty"`
|
||||
|
@ -17,6 +18,7 @@ type Resources struct {
|
|||
ClusterPolicy map[string]any `json:"databricks_cluster_policy,omitempty"`
|
||||
ComplianceSecurityProfileWorkspaceSetting map[string]any `json:"databricks_compliance_security_profile_workspace_setting,omitempty"`
|
||||
Connection map[string]any `json:"databricks_connection,omitempty"`
|
||||
CustomAppIntegration map[string]any `json:"databricks_custom_app_integration,omitempty"`
|
||||
Dashboard map[string]any `json:"databricks_dashboard,omitempty"`
|
||||
DbfsFile map[string]any `json:"databricks_dbfs_file,omitempty"`
|
||||
DefaultNamespaceSetting map[string]any `json:"databricks_default_namespace_setting,omitempty"`
|
||||
|
@ -68,6 +70,7 @@ type Resources struct {
|
|||
Pipeline map[string]any `json:"databricks_pipeline,omitempty"`
|
||||
Provider map[string]any `json:"databricks_provider,omitempty"`
|
||||
QualityMonitor map[string]any `json:"databricks_quality_monitor,omitempty"`
|
||||
Query map[string]any `json:"databricks_query,omitempty"`
|
||||
Recipient map[string]any `json:"databricks_recipient,omitempty"`
|
||||
RegisteredModel map[string]any `json:"databricks_registered_model,omitempty"`
|
||||
Repo map[string]any `json:"databricks_repo,omitempty"`
|
||||
|
@ -107,6 +110,7 @@ type Resources struct {
|
|||
func NewResources() *Resources {
|
||||
return &Resources{
|
||||
AccessControlRuleSet: make(map[string]any),
|
||||
Alert: make(map[string]any),
|
||||
ArtifactAllowlist: make(map[string]any),
|
||||
AutomaticClusterUpdateWorkspaceSetting: make(map[string]any),
|
||||
AwsS3Mount: make(map[string]any),
|
||||
|
@ -120,6 +124,7 @@ func NewResources() *Resources {
|
|||
ClusterPolicy: make(map[string]any),
|
||||
ComplianceSecurityProfileWorkspaceSetting: make(map[string]any),
|
||||
Connection: make(map[string]any),
|
||||
CustomAppIntegration: make(map[string]any),
|
||||
Dashboard: make(map[string]any),
|
||||
DbfsFile: make(map[string]any),
|
||||
DefaultNamespaceSetting: make(map[string]any),
|
||||
|
@ -171,6 +176,7 @@ func NewResources() *Resources {
|
|||
Pipeline: make(map[string]any),
|
||||
Provider: make(map[string]any),
|
||||
QualityMonitor: make(map[string]any),
|
||||
Query: make(map[string]any),
|
||||
Recipient: make(map[string]any),
|
||||
RegisteredModel: make(map[string]any),
|
||||
Repo: make(map[string]any),
|
||||
|
|
|
@ -21,7 +21,7 @@ type Root struct {
|
|||
|
||||
const ProviderHost = "registry.terraform.io"
|
||||
const ProviderSource = "databricks/databricks"
|
||||
const ProviderVersion = "1.53.0"
|
||||
const ProviderVersion = "1.58.0"
|
||||
|
||||
func NewRoot() *Root {
|
||||
return &Root{
|
||||
|
|
|
@ -36,3 +36,13 @@ func IsWorkspaceLibrary(library *compute.Library) bool {
|
|||
|
||||
return IsWorkspacePath(path)
|
||||
}
|
||||
|
||||
// IsVolumesPath returns true if the specified path indicates that
|
||||
// it should be interpreted as a Databricks Volumes path.
|
||||
func IsVolumesPath(path string) bool {
|
||||
return strings.HasPrefix(path, "/Volumes/")
|
||||
}
|
||||
|
||||
func IsWorkspaceSharedPath(path string) bool {
|
||||
return strings.HasPrefix(path, "/Workspace/Shared/")
|
||||
}
|
|
@ -31,3 +31,13 @@ func TestIsWorkspaceLibrary(t *testing.T) {
|
|||
// Empty.
|
||||
assert.False(t, IsWorkspaceLibrary(&compute.Library{}))
|
||||
}
|
||||
|
||||
func TestIsVolumesPath(t *testing.T) {
|
||||
// Absolute paths with particular prefixes.
|
||||
assert.True(t, IsVolumesPath("/Volumes/path/to/package"))
|
||||
|
||||
// Relative paths.
|
||||
assert.False(t, IsVolumesPath("myfile.txt"))
|
||||
assert.False(t, IsVolumesPath("./myfile.txt"))
|
||||
assert.False(t, IsVolumesPath("../myfile.txt"))
|
||||
}
|
|
@ -0,0 +1,39 @@
|
|||
package paths
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/bundle/libraries"
|
||||
)
|
||||
|
||||
func CollectUniqueWorkspacePathPrefixes(workspace config.Workspace) []string {
|
||||
rootPath := workspace.RootPath
|
||||
paths := []string{}
|
||||
if !libraries.IsVolumesPath(rootPath) {
|
||||
paths = append(paths, rootPath)
|
||||
}
|
||||
|
||||
if !strings.HasSuffix(rootPath, "/") {
|
||||
rootPath += "/"
|
||||
}
|
||||
|
||||
for _, p := range []string{
|
||||
workspace.ArtifactPath,
|
||||
workspace.FilePath,
|
||||
workspace.StatePath,
|
||||
workspace.ResourcePath,
|
||||
} {
|
||||
if libraries.IsVolumesPath(p) {
|
||||
continue
|
||||
}
|
||||
|
||||
if strings.HasPrefix(p, rootPath) {
|
||||
continue
|
||||
}
|
||||
|
||||
paths = append(paths, p)
|
||||
}
|
||||
|
||||
return paths
|
||||
}
|
|
@ -39,6 +39,10 @@ var levelsMap = map[string](map[string]string){
|
|||
CAN_VIEW: "CAN_VIEW",
|
||||
CAN_RUN: "CAN_QUERY",
|
||||
},
|
||||
"dashboards": {
|
||||
CAN_MANAGE: "CAN_MANAGE",
|
||||
CAN_VIEW: "CAN_READ",
|
||||
},
|
||||
}
|
||||
|
||||
type bundlePermissions struct{}
|
||||
|
|
|
@ -3,9 +3,9 @@ package permissions
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/libraries"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
)
|
||||
|
||||
|
@ -21,17 +21,13 @@ func (*validateSharedRootPermissions) Name() string {
|
|||
}
|
||||
|
||||
func (*validateSharedRootPermissions) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||
if isWorkspaceSharedRoot(b.Config.Workspace.RootPath) {
|
||||
if libraries.IsWorkspaceSharedPath(b.Config.Workspace.RootPath) {
|
||||
return isUsersGroupPermissionSet(b)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func isWorkspaceSharedRoot(path string) bool {
|
||||
return strings.HasPrefix(path, "/Workspace/Shared/")
|
||||
}
|
||||
|
||||
// isUsersGroupPermissionSet checks that top-level permissions set for bundle contain group_name: users with CAN_MANAGE permission.
|
||||
func isUsersGroupPermissionSet(b *bundle.Bundle) diag.Diagnostics {
|
||||
var diags diag.Diagnostics
|
||||
|
|
|
@ -0,0 +1,89 @@
|
|||
package permissions
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/databricks/cli/bundle/config/resources"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/databricks-sdk-go/service/workspace"
|
||||
)
|
||||
|
||||
type WorkspacePathPermissions struct {
|
||||
Path string
|
||||
Permissions []resources.Permission
|
||||
}
|
||||
|
||||
func ObjectAclToResourcePermissions(path string, acl []workspace.WorkspaceObjectAccessControlResponse) *WorkspacePathPermissions {
|
||||
permissions := make([]resources.Permission, 0)
|
||||
for _, a := range acl {
|
||||
// Skip the admin group because it's added to all resources by default.
|
||||
if a.GroupName == "admins" {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, pl := range a.AllPermissions {
|
||||
permissions = append(permissions, resources.Permission{
|
||||
Level: convertWorkspaceObjectPermissionLevel(pl.PermissionLevel),
|
||||
GroupName: a.GroupName,
|
||||
UserName: a.UserName,
|
||||
ServicePrincipalName: a.ServicePrincipalName,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return &WorkspacePathPermissions{Permissions: permissions, Path: path}
|
||||
}
|
||||
|
||||
func (p WorkspacePathPermissions) Compare(perms []resources.Permission) diag.Diagnostics {
|
||||
var diags diag.Diagnostics
|
||||
|
||||
// Check the permissions in the workspace and see if they are all set in the bundle.
|
||||
ok, missing := containsAll(p.Permissions, perms)
|
||||
if !ok {
|
||||
diags = diags.Append(diag.Diagnostic{
|
||||
Severity: diag.Warning,
|
||||
Summary: "untracked permissions apply to target workspace path",
|
||||
Detail: fmt.Sprintf("The following permissions apply to the workspace folder at %q but are not configured in the bundle:\n%s", p.Path, toString(missing)),
|
||||
})
|
||||
}
|
||||
|
||||
return diags
|
||||
}
|
||||
|
||||
// containsAll checks if permA contains all permissions in permB.
|
||||
func containsAll(permA []resources.Permission, permB []resources.Permission) (bool, []resources.Permission) {
|
||||
missing := make([]resources.Permission, 0)
|
||||
for _, a := range permA {
|
||||
found := false
|
||||
for _, b := range permB {
|
||||
if a == b {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
missing = append(missing, a)
|
||||
}
|
||||
}
|
||||
return len(missing) == 0, missing
|
||||
}
|
||||
|
||||
// convertWorkspaceObjectPermissionLevel converts matching object permission levels to bundle ones.
|
||||
// If there is no matching permission level, it returns permission level as is, for example, CAN_EDIT.
|
||||
func convertWorkspaceObjectPermissionLevel(level workspace.WorkspaceObjectPermissionLevel) string {
|
||||
switch level {
|
||||
case workspace.WorkspaceObjectPermissionLevelCanRead:
|
||||
return CAN_VIEW
|
||||
default:
|
||||
return string(level)
|
||||
}
|
||||
}
|
||||
|
||||
func toString(p []resources.Permission) string {
|
||||
var sb strings.Builder
|
||||
for _, perm := range p {
|
||||
sb.WriteString(fmt.Sprintf("- %s\n", perm.String()))
|
||||
}
|
||||
return sb.String()
|
||||
}
|
|
@ -0,0 +1,121 @@
|
|||
package permissions
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/bundle/config/resources"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/databricks-sdk-go/service/workspace"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestWorkspacePathPermissionsCompare(t *testing.T) {
|
||||
testCases := []struct {
|
||||
perms []resources.Permission
|
||||
acl []workspace.WorkspaceObjectAccessControlResponse
|
||||
expected diag.Diagnostics
|
||||
}{
|
||||
{
|
||||
perms: []resources.Permission{
|
||||
{Level: CAN_MANAGE, UserName: "foo@bar.com"},
|
||||
},
|
||||
acl: []workspace.WorkspaceObjectAccessControlResponse{
|
||||
{
|
||||
UserName: "foo@bar.com",
|
||||
AllPermissions: []workspace.WorkspaceObjectPermission{
|
||||
{PermissionLevel: "CAN_MANAGE"},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: nil,
|
||||
},
|
||||
{
|
||||
perms: []resources.Permission{
|
||||
{Level: CAN_MANAGE, UserName: "foo@bar.com"},
|
||||
},
|
||||
acl: []workspace.WorkspaceObjectAccessControlResponse{
|
||||
{
|
||||
UserName: "foo@bar.com",
|
||||
AllPermissions: []workspace.WorkspaceObjectPermission{
|
||||
{PermissionLevel: "CAN_MANAGE"},
|
||||
},
|
||||
},
|
||||
{
|
||||
GroupName: "admins",
|
||||
AllPermissions: []workspace.WorkspaceObjectPermission{
|
||||
{PermissionLevel: "CAN_MANAGE"},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: nil,
|
||||
},
|
||||
{
|
||||
perms: []resources.Permission{
|
||||
{Level: CAN_VIEW, UserName: "foo@bar.com"},
|
||||
{Level: CAN_MANAGE, ServicePrincipalName: "sp.com"},
|
||||
},
|
||||
acl: []workspace.WorkspaceObjectAccessControlResponse{
|
||||
{
|
||||
UserName: "foo@bar.com",
|
||||
AllPermissions: []workspace.WorkspaceObjectPermission{
|
||||
{PermissionLevel: "CAN_READ"},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: nil,
|
||||
},
|
||||
{
|
||||
perms: []resources.Permission{
|
||||
{Level: CAN_MANAGE, UserName: "foo@bar.com"},
|
||||
},
|
||||
acl: []workspace.WorkspaceObjectAccessControlResponse{
|
||||
{
|
||||
UserName: "foo@bar.com",
|
||||
AllPermissions: []workspace.WorkspaceObjectPermission{
|
||||
{PermissionLevel: "CAN_MANAGE"},
|
||||
},
|
||||
},
|
||||
{
|
||||
GroupName: "foo",
|
||||
AllPermissions: []workspace.WorkspaceObjectPermission{
|
||||
{PermissionLevel: "CAN_MANAGE"},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: diag.Diagnostics{
|
||||
{
|
||||
Severity: diag.Warning,
|
||||
Summary: "untracked permissions apply to target workspace path",
|
||||
Detail: "The following permissions apply to the workspace folder at \"path\" but are not configured in the bundle:\n- level: CAN_MANAGE, group_name: foo\n",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
perms: []resources.Permission{
|
||||
{Level: CAN_MANAGE, UserName: "foo@bar.com"},
|
||||
},
|
||||
acl: []workspace.WorkspaceObjectAccessControlResponse{
|
||||
{
|
||||
UserName: "foo2@bar.com",
|
||||
AllPermissions: []workspace.WorkspaceObjectPermission{
|
||||
{PermissionLevel: "CAN_MANAGE"},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: diag.Diagnostics{
|
||||
{
|
||||
Severity: diag.Warning,
|
||||
Summary: "untracked permissions apply to target workspace path",
|
||||
Detail: "The following permissions apply to the workspace folder at \"path\" but are not configured in the bundle:\n- level: CAN_MANAGE, user_name: foo2@bar.com\n",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
wp := ObjectAclToResourcePermissions("path", tc.acl)
|
||||
diags := wp.Compare(tc.perms)
|
||||
require.Equal(t, tc.expected, diags)
|
||||
}
|
||||
|
||||
}
|
|
@ -5,8 +5,11 @@ import (
|
|||
"fmt"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/libraries"
|
||||
"github.com/databricks/cli/bundle/paths"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/databricks-sdk-go/service/workspace"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
type workspaceRootPermissions struct {
|
||||
|
@ -34,7 +37,7 @@ func giveAccessForWorkspaceRoot(ctx context.Context, b *bundle.Bundle) error {
|
|||
permissions := make([]workspace.WorkspaceObjectAccessControlRequest, 0)
|
||||
|
||||
for _, p := range b.Config.Permissions {
|
||||
level, err := getWorkspaceObjectPermissionLevel(p.Level)
|
||||
level, err := GetWorkspaceObjectPermissionLevel(p.Level)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -52,20 +55,39 @@ func giveAccessForWorkspaceRoot(ctx context.Context, b *bundle.Bundle) error {
|
|||
}
|
||||
|
||||
w := b.WorkspaceClient().Workspace
|
||||
obj, err := w.GetStatusByPath(ctx, b.Config.Workspace.RootPath)
|
||||
bundlePaths := paths.CollectUniqueWorkspacePathPrefixes(b.Config.Workspace)
|
||||
|
||||
g, ctx := errgroup.WithContext(ctx)
|
||||
for _, p := range bundlePaths {
|
||||
g.Go(func() error {
|
||||
return setPermissions(ctx, w, p, permissions)
|
||||
})
|
||||
}
|
||||
|
||||
return g.Wait()
|
||||
}
|
||||
|
||||
func setPermissions(ctx context.Context, w workspace.WorkspaceInterface, path string, permissions []workspace.WorkspaceObjectAccessControlRequest) error {
|
||||
// If the folder is shared, then we don't need to set permissions since it's always set for all users and it's checked in mutators before.
|
||||
if libraries.IsWorkspaceSharedPath(path) {
|
||||
return nil
|
||||
}
|
||||
|
||||
obj, err := w.GetStatusByPath(ctx, path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = w.UpdatePermissions(ctx, workspace.WorkspaceObjectPermissionsRequest{
|
||||
_, err = w.SetPermissions(ctx, workspace.WorkspaceObjectPermissionsRequest{
|
||||
WorkspaceObjectId: fmt.Sprint(obj.ObjectId),
|
||||
WorkspaceObjectType: "directories",
|
||||
AccessControlList: permissions,
|
||||
})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func getWorkspaceObjectPermissionLevel(bundlePermission string) (workspace.WorkspaceObjectPermissionLevel, error) {
|
||||
func GetWorkspaceObjectPermissionLevel(bundlePermission string) (workspace.WorkspaceObjectPermissionLevel, error) {
|
||||
switch bundlePermission {
|
||||
case CAN_MANAGE:
|
||||
return workspace.WorkspaceObjectPermissionLevelCanManage, nil
|
||||
|
|
|
@ -21,7 +21,11 @@ func TestApplyWorkspaceRootPermissions(t *testing.T) {
|
|||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Workspace: config.Workspace{
|
||||
RootPath: "/Users/foo@bar.com",
|
||||
RootPath: "/Users/foo@bar.com",
|
||||
ArtifactPath: "/Users/foo@bar.com/artifacts",
|
||||
FilePath: "/Users/foo@bar.com/files",
|
||||
StatePath: "/Users/foo@bar.com/state",
|
||||
ResourcePath: "/Users/foo@bar.com/resources",
|
||||
},
|
||||
Permissions: []resources.Permission{
|
||||
{Level: CAN_MANAGE, UserName: "TestUser"},
|
||||
|
@ -59,7 +63,7 @@ func TestApplyWorkspaceRootPermissions(t *testing.T) {
|
|||
workspaceApi.EXPECT().GetStatusByPath(mock.Anything, "/Users/foo@bar.com").Return(&workspace.ObjectInfo{
|
||||
ObjectId: 1234,
|
||||
}, nil)
|
||||
workspaceApi.EXPECT().UpdatePermissions(mock.Anything, workspace.WorkspaceObjectPermissionsRequest{
|
||||
workspaceApi.EXPECT().SetPermissions(mock.Anything, workspace.WorkspaceObjectPermissionsRequest{
|
||||
AccessControlList: []workspace.WorkspaceObjectAccessControlRequest{
|
||||
{UserName: "TestUser", PermissionLevel: "CAN_MANAGE"},
|
||||
{GroupName: "TestGroup", PermissionLevel: "CAN_READ"},
|
||||
|
@ -72,3 +76,116 @@ func TestApplyWorkspaceRootPermissions(t *testing.T) {
|
|||
diags := bundle.Apply(context.Background(), b, bundle.Seq(ValidateSharedRootPermissions(), ApplyWorkspaceRootPermissions()))
|
||||
require.Empty(t, diags)
|
||||
}
|
||||
|
||||
func TestApplyWorkspaceRootPermissionsForAllPaths(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Workspace: config.Workspace{
|
||||
RootPath: "/Some/Root/Path",
|
||||
ArtifactPath: "/Users/foo@bar.com/artifacts",
|
||||
FilePath: "/Users/foo@bar.com/files",
|
||||
StatePath: "/Users/foo@bar.com/state",
|
||||
ResourcePath: "/Users/foo@bar.com/resources",
|
||||
},
|
||||
Permissions: []resources.Permission{
|
||||
{Level: CAN_MANAGE, UserName: "TestUser"},
|
||||
{Level: CAN_VIEW, GroupName: "TestGroup"},
|
||||
{Level: CAN_RUN, ServicePrincipalName: "TestServicePrincipal"},
|
||||
},
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"job_1": {JobSettings: &jobs.JobSettings{Name: "job_1"}},
|
||||
"job_2": {JobSettings: &jobs.JobSettings{Name: "job_2"}},
|
||||
},
|
||||
Pipelines: map[string]*resources.Pipeline{
|
||||
"pipeline_1": {PipelineSpec: &pipelines.PipelineSpec{}},
|
||||
"pipeline_2": {PipelineSpec: &pipelines.PipelineSpec{}},
|
||||
},
|
||||
Models: map[string]*resources.MlflowModel{
|
||||
"model_1": {Model: &ml.Model{}},
|
||||
"model_2": {Model: &ml.Model{}},
|
||||
},
|
||||
Experiments: map[string]*resources.MlflowExperiment{
|
||||
"experiment_1": {Experiment: &ml.Experiment{}},
|
||||
"experiment_2": {Experiment: &ml.Experiment{}},
|
||||
},
|
||||
ModelServingEndpoints: map[string]*resources.ModelServingEndpoint{
|
||||
"endpoint_1": {CreateServingEndpoint: &serving.CreateServingEndpoint{}},
|
||||
"endpoint_2": {CreateServingEndpoint: &serving.CreateServingEndpoint{}},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
m := mocks.NewMockWorkspaceClient(t)
|
||||
b.SetWorkpaceClient(m.WorkspaceClient)
|
||||
workspaceApi := m.GetMockWorkspaceAPI()
|
||||
workspaceApi.EXPECT().GetStatusByPath(mock.Anything, "/Some/Root/Path").Return(&workspace.ObjectInfo{
|
||||
ObjectId: 1,
|
||||
}, nil)
|
||||
workspaceApi.EXPECT().GetStatusByPath(mock.Anything, "/Users/foo@bar.com/artifacts").Return(&workspace.ObjectInfo{
|
||||
ObjectId: 2,
|
||||
}, nil)
|
||||
workspaceApi.EXPECT().GetStatusByPath(mock.Anything, "/Users/foo@bar.com/files").Return(&workspace.ObjectInfo{
|
||||
ObjectId: 3,
|
||||
}, nil)
|
||||
workspaceApi.EXPECT().GetStatusByPath(mock.Anything, "/Users/foo@bar.com/state").Return(&workspace.ObjectInfo{
|
||||
ObjectId: 4,
|
||||
}, nil)
|
||||
workspaceApi.EXPECT().GetStatusByPath(mock.Anything, "/Users/foo@bar.com/resources").Return(&workspace.ObjectInfo{
|
||||
ObjectId: 5,
|
||||
}, nil)
|
||||
|
||||
workspaceApi.EXPECT().SetPermissions(mock.Anything, workspace.WorkspaceObjectPermissionsRequest{
|
||||
AccessControlList: []workspace.WorkspaceObjectAccessControlRequest{
|
||||
{UserName: "TestUser", PermissionLevel: "CAN_MANAGE"},
|
||||
{GroupName: "TestGroup", PermissionLevel: "CAN_READ"},
|
||||
{ServicePrincipalName: "TestServicePrincipal", PermissionLevel: "CAN_RUN"},
|
||||
},
|
||||
WorkspaceObjectId: "1",
|
||||
WorkspaceObjectType: "directories",
|
||||
}).Return(nil, nil)
|
||||
|
||||
workspaceApi.EXPECT().SetPermissions(mock.Anything, workspace.WorkspaceObjectPermissionsRequest{
|
||||
AccessControlList: []workspace.WorkspaceObjectAccessControlRequest{
|
||||
{UserName: "TestUser", PermissionLevel: "CAN_MANAGE"},
|
||||
{GroupName: "TestGroup", PermissionLevel: "CAN_READ"},
|
||||
{ServicePrincipalName: "TestServicePrincipal", PermissionLevel: "CAN_RUN"},
|
||||
},
|
||||
WorkspaceObjectId: "2",
|
||||
WorkspaceObjectType: "directories",
|
||||
}).Return(nil, nil)
|
||||
|
||||
workspaceApi.EXPECT().SetPermissions(mock.Anything, workspace.WorkspaceObjectPermissionsRequest{
|
||||
AccessControlList: []workspace.WorkspaceObjectAccessControlRequest{
|
||||
{UserName: "TestUser", PermissionLevel: "CAN_MANAGE"},
|
||||
{GroupName: "TestGroup", PermissionLevel: "CAN_READ"},
|
||||
{ServicePrincipalName: "TestServicePrincipal", PermissionLevel: "CAN_RUN"},
|
||||
},
|
||||
WorkspaceObjectId: "3",
|
||||
WorkspaceObjectType: "directories",
|
||||
}).Return(nil, nil)
|
||||
|
||||
workspaceApi.EXPECT().SetPermissions(mock.Anything, workspace.WorkspaceObjectPermissionsRequest{
|
||||
AccessControlList: []workspace.WorkspaceObjectAccessControlRequest{
|
||||
{UserName: "TestUser", PermissionLevel: "CAN_MANAGE"},
|
||||
{GroupName: "TestGroup", PermissionLevel: "CAN_READ"},
|
||||
{ServicePrincipalName: "TestServicePrincipal", PermissionLevel: "CAN_RUN"},
|
||||
},
|
||||
WorkspaceObjectId: "4",
|
||||
WorkspaceObjectType: "directories",
|
||||
}).Return(nil, nil)
|
||||
|
||||
workspaceApi.EXPECT().SetPermissions(mock.Anything, workspace.WorkspaceObjectPermissionsRequest{
|
||||
AccessControlList: []workspace.WorkspaceObjectAccessControlRequest{
|
||||
{UserName: "TestUser", PermissionLevel: "CAN_MANAGE"},
|
||||
{GroupName: "TestGroup", PermissionLevel: "CAN_READ"},
|
||||
{ServicePrincipalName: "TestServicePrincipal", PermissionLevel: "CAN_RUN"},
|
||||
},
|
||||
WorkspaceObjectId: "5",
|
||||
WorkspaceObjectType: "directories",
|
||||
}).Return(nil, nil)
|
||||
|
||||
diags := bundle.Apply(context.Background(), b, ApplyWorkspaceRootPermissions())
|
||||
require.NoError(t, diags.Error())
|
||||
}
|
||||
|
|
|
@ -152,6 +152,7 @@ func Deploy(outputHandler sync.OutputHandler) bundle.Mutator {
|
|||
bundle.Defer(
|
||||
bundle.Seq(
|
||||
terraform.StatePull(),
|
||||
terraform.CheckDashboardsModifiedRemotely(),
|
||||
deploy.StatePull(),
|
||||
mutator.ValidateGitDetails(),
|
||||
artifacts.CleanUp(),
|
||||
|
|
|
@ -66,6 +66,7 @@ func Initialize() bundle.Mutator {
|
|||
permissions.PermissionDiagnostics(),
|
||||
mutator.SetRunAs(),
|
||||
mutator.OverrideCompute(),
|
||||
mutator.ConfigureDashboardDefaults(),
|
||||
mutator.ProcessTargetMode(),
|
||||
mutator.ApplyPresets(),
|
||||
mutator.DefaultQueueing(),
|
||||
|
|
|
@ -0,0 +1,17 @@
|
|||
package resources
|
||||
|
||||
import "github.com/databricks/cli/bundle"
|
||||
|
||||
// Completions returns the same as [References] except
|
||||
// that every key maps directly to a single reference.
|
||||
func Completions(b *bundle.Bundle, filters ...Filter) map[string]Reference {
|
||||
out := make(map[string]Reference)
|
||||
keyOnlyRefs, _ := References(b, filters...)
|
||||
for k, refs := range keyOnlyRefs {
|
||||
if len(refs) != 1 {
|
||||
continue
|
||||
}
|
||||
out[k] = refs[0]
|
||||
}
|
||||
return out
|
||||
}
|
|
@ -0,0 +1,58 @@
|
|||
package resources
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/bundle/config/resources"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestCompletions_SkipDuplicates(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"foo": {},
|
||||
"bar": {},
|
||||
},
|
||||
Pipelines: map[string]*resources.Pipeline{
|
||||
"foo": {},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Test that this skips duplicates and only includes unambiguous completions.
|
||||
out := Completions(b)
|
||||
if assert.Len(t, out, 1) {
|
||||
assert.Contains(t, out, "bar")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCompletions_Filter(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"foo": {},
|
||||
},
|
||||
Pipelines: map[string]*resources.Pipeline{
|
||||
"bar": {},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
includeJobs := func(ref Reference) bool {
|
||||
_, ok := ref.Resource.(*resources.Job)
|
||||
return ok
|
||||
}
|
||||
|
||||
// Test that this does not include the pipeline.
|
||||
out := Completions(b, includeJobs)
|
||||
if assert.Len(t, out, 1) {
|
||||
assert.Contains(t, out, "foo")
|
||||
}
|
||||
}
|
|
@ -0,0 +1,98 @@
|
|||
package resources
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
)
|
||||
|
||||
// Reference is a reference to a resource.
|
||||
// It includes the resource type description, and a reference to the resource itself.
|
||||
type Reference struct {
|
||||
// Key is the unique key of the resource, e.g. "my_job".
|
||||
Key string
|
||||
|
||||
// KeyWithType is the unique key of the resource, including the resource type, e.g. "jobs.my_job".
|
||||
KeyWithType string
|
||||
|
||||
// Description is the resource type description.
|
||||
Description config.ResourceDescription
|
||||
|
||||
// Resource is the resource itself.
|
||||
Resource config.ConfigResource
|
||||
}
|
||||
|
||||
// Map is the core type for resource lookup and completion.
|
||||
type Map map[string][]Reference
|
||||
|
||||
// Filter defines the function signature for filtering resources.
|
||||
type Filter func(Reference) bool
|
||||
|
||||
// includeReference checks if the specified reference passes all filters.
|
||||
// If the list of filters is empty, the reference is always included.
|
||||
func includeReference(filters []Filter, ref Reference) bool {
|
||||
for _, filter := range filters {
|
||||
if !filter(ref) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// References returns maps of resource keys to a slice of [Reference].
|
||||
//
|
||||
// The first map is indexed by the resource key only.
|
||||
// The second map is indexed by the resource type name and its key.
|
||||
//
|
||||
// While the return types allows for multiple resources to share the same key,
|
||||
// this is confirmed not to happen in the [validate.UniqueResourceKeys] mutator.
|
||||
func References(b *bundle.Bundle, filters ...Filter) (Map, Map) {
|
||||
keyOnly := make(Map)
|
||||
keyWithType := make(Map)
|
||||
|
||||
// Collect map of resource references indexed by their keys.
|
||||
for _, group := range b.Config.Resources.AllResources() {
|
||||
for k, v := range group.Resources {
|
||||
ref := Reference{
|
||||
Key: k,
|
||||
KeyWithType: fmt.Sprintf("%s.%s", group.Description.PluralName, k),
|
||||
Description: group.Description,
|
||||
Resource: v,
|
||||
}
|
||||
|
||||
// Skip resources that do not pass all filters.
|
||||
if !includeReference(filters, ref) {
|
||||
continue
|
||||
}
|
||||
|
||||
keyOnly[ref.Key] = append(keyOnly[ref.Key], ref)
|
||||
keyWithType[ref.KeyWithType] = append(keyWithType[ref.KeyWithType], ref)
|
||||
}
|
||||
}
|
||||
|
||||
return keyOnly, keyWithType
|
||||
}
|
||||
|
||||
// Lookup returns the resource with the specified key.
|
||||
// If the key maps to more than one resource, an error is returned.
|
||||
// If the key does not map to any resource, an error is returned.
|
||||
func Lookup(b *bundle.Bundle, key string, filters ...Filter) (Reference, error) {
|
||||
keyOnlyRefs, keyWithTypeRefs := References(b, filters...)
|
||||
refs, ok := keyOnlyRefs[key]
|
||||
if !ok {
|
||||
refs, ok = keyWithTypeRefs[key]
|
||||
if !ok {
|
||||
return Reference{}, fmt.Errorf("resource with key %q not found", key)
|
||||
}
|
||||
}
|
||||
|
||||
switch {
|
||||
case len(refs) == 1:
|
||||
return refs[0], nil
|
||||
case len(refs) > 1:
|
||||
return Reference{}, fmt.Errorf("multiple resources with key %q found", key)
|
||||
default:
|
||||
panic("unreachable")
|
||||
}
|
||||
}
|
|
@ -0,0 +1,117 @@
|
|||
package resources
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/bundle/config/resources"
|
||||
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestLookup_EmptyBundle(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Resources: config.Resources{},
|
||||
},
|
||||
}
|
||||
|
||||
_, err := Lookup(b, "foo")
|
||||
require.Error(t, err)
|
||||
assert.ErrorContains(t, err, "resource with key \"foo\" not found")
|
||||
}
|
||||
|
||||
func TestLookup_NotFound(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"foo": {},
|
||||
"bar": {},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
_, err := Lookup(b, "qux")
|
||||
require.Error(t, err)
|
||||
assert.ErrorContains(t, err, `resource with key "qux" not found`)
|
||||
}
|
||||
|
||||
func TestLookup_MultipleFound(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"foo": {},
|
||||
},
|
||||
Pipelines: map[string]*resources.Pipeline{
|
||||
"foo": {},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
_, err := Lookup(b, "foo")
|
||||
require.Error(t, err)
|
||||
assert.ErrorContains(t, err, `multiple resources with key "foo" found`)
|
||||
}
|
||||
|
||||
func TestLookup_Nominal(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"foo": {
|
||||
JobSettings: &jobs.JobSettings{
|
||||
Name: "Foo job",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Lookup by key only.
|
||||
out, err := Lookup(b, "foo")
|
||||
if assert.NoError(t, err) {
|
||||
assert.Equal(t, "Foo job", out.Resource.GetName())
|
||||
}
|
||||
|
||||
// Lookup by type and key.
|
||||
out, err = Lookup(b, "jobs.foo")
|
||||
if assert.NoError(t, err) {
|
||||
assert.Equal(t, "Foo job", out.Resource.GetName())
|
||||
}
|
||||
}
|
||||
|
||||
func TestLookup_NominalWithFilters(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"foo": {},
|
||||
},
|
||||
Pipelines: map[string]*resources.Pipeline{
|
||||
"bar": {},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
includeJobs := func(ref Reference) bool {
|
||||
_, ok := ref.Resource.(*resources.Job)
|
||||
return ok
|
||||
}
|
||||
|
||||
// This should succeed because the filter includes jobs.
|
||||
_, err := Lookup(b, "foo", includeJobs)
|
||||
require.NoError(t, err)
|
||||
|
||||
// This should fail because the filter excludes pipelines.
|
||||
_, err = Lookup(b, "bar", includeJobs)
|
||||
require.Error(t, err)
|
||||
assert.ErrorContains(t, err, `resource with key "bar" not found`)
|
||||
}
|
|
@ -317,6 +317,29 @@ func (r *jobRunner) Cancel(ctx context.Context) error {
|
|||
return errGroup.Wait()
|
||||
}
|
||||
|
||||
func (r *jobRunner) Restart(ctx context.Context, opts *Options) (output.RunOutput, error) {
|
||||
// We don't need to cancel existing runs if the job is continuous and unpaused.
|
||||
// the /jobs/run-now API will automatically cancel any existing runs before starting a new one.
|
||||
//
|
||||
// /jobs/run-now will not cancel existing runs if the job is continuous and paused.
|
||||
// New job runs will be queued instead and will wait for existing runs to finish.
|
||||
// In this case, we need to cancel the existing runs before starting a new one.
|
||||
continuous := r.job.JobSettings.Continuous
|
||||
if continuous != nil && continuous.PauseStatus == jobs.PauseStatusUnpaused {
|
||||
return r.Run(ctx, opts)
|
||||
}
|
||||
|
||||
s := cmdio.Spinner(ctx)
|
||||
s <- "Cancelling all active job runs"
|
||||
err := r.Cancel(ctx)
|
||||
close(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return r.Run(ctx, opts)
|
||||
}
|
||||
|
||||
func (r *jobRunner) ParseArgs(args []string, opts *Options) error {
|
||||
return r.posArgsHandler().ParseArgs(args, opts)
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package run
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
@ -8,6 +9,8 @@ import (
|
|||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/bundle/config/resources"
|
||||
"github.com/databricks/cli/libs/cmdio"
|
||||
"github.com/databricks/cli/libs/flags"
|
||||
"github.com/databricks/databricks-sdk-go/experimental/mocks"
|
||||
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||
"github.com/stretchr/testify/mock"
|
||||
|
@ -126,3 +129,132 @@ func TestJobRunnerCancelWithNoActiveRuns(t *testing.T) {
|
|||
err := runner.Cancel(context.Background())
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestJobRunnerRestart(t *testing.T) {
|
||||
for _, jobSettings := range []*jobs.JobSettings{
|
||||
{},
|
||||
{
|
||||
Continuous: &jobs.Continuous{
|
||||
PauseStatus: jobs.PauseStatusPaused,
|
||||
},
|
||||
},
|
||||
} {
|
||||
job := &resources.Job{
|
||||
ID: "123",
|
||||
JobSettings: jobSettings,
|
||||
}
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"test_job": job,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
runner := jobRunner{key: "test", bundle: b, job: job}
|
||||
|
||||
m := mocks.NewMockWorkspaceClient(t)
|
||||
b.SetWorkpaceClient(m.WorkspaceClient)
|
||||
ctx := context.Background()
|
||||
ctx = cmdio.InContext(ctx, cmdio.NewIO(flags.OutputText, &bytes.Buffer{}, &bytes.Buffer{}, &bytes.Buffer{}, "", ""))
|
||||
ctx = cmdio.NewContext(ctx, cmdio.NewLogger(flags.ModeAppend))
|
||||
|
||||
jobApi := m.GetMockJobsAPI()
|
||||
jobApi.EXPECT().ListRunsAll(mock.Anything, jobs.ListRunsRequest{
|
||||
ActiveOnly: true,
|
||||
JobId: 123,
|
||||
}).Return([]jobs.BaseRun{
|
||||
{RunId: 1},
|
||||
{RunId: 2},
|
||||
}, nil)
|
||||
|
||||
// Mock the runner cancelling existing job runs.
|
||||
mockWait := &jobs.WaitGetRunJobTerminatedOrSkipped[struct{}]{
|
||||
Poll: func(time time.Duration, f func(j *jobs.Run)) (*jobs.Run, error) {
|
||||
return nil, nil
|
||||
},
|
||||
}
|
||||
jobApi.EXPECT().CancelRun(mock.Anything, jobs.CancelRun{
|
||||
RunId: 1,
|
||||
}).Return(mockWait, nil)
|
||||
jobApi.EXPECT().CancelRun(mock.Anything, jobs.CancelRun{
|
||||
RunId: 2,
|
||||
}).Return(mockWait, nil)
|
||||
|
||||
// Mock the runner triggering a job run
|
||||
mockWaitForRun := &jobs.WaitGetRunJobTerminatedOrSkipped[jobs.RunNowResponse]{
|
||||
Poll: func(d time.Duration, f func(*jobs.Run)) (*jobs.Run, error) {
|
||||
return &jobs.Run{
|
||||
State: &jobs.RunState{
|
||||
ResultState: jobs.RunResultStateSuccess,
|
||||
},
|
||||
}, nil
|
||||
},
|
||||
}
|
||||
jobApi.EXPECT().RunNow(mock.Anything, jobs.RunNow{
|
||||
JobId: 123,
|
||||
}).Return(mockWaitForRun, nil)
|
||||
|
||||
// Mock the runner getting the job output
|
||||
jobApi.EXPECT().GetRun(mock.Anything, jobs.GetRunRequest{}).Return(&jobs.Run{}, nil)
|
||||
|
||||
_, err := runner.Restart(ctx, &Options{})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestJobRunnerRestartForContinuousUnpausedJobs(t *testing.T) {
|
||||
job := &resources.Job{
|
||||
ID: "123",
|
||||
JobSettings: &jobs.JobSettings{
|
||||
Continuous: &jobs.Continuous{
|
||||
PauseStatus: jobs.PauseStatusUnpaused,
|
||||
},
|
||||
},
|
||||
}
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"test_job": job,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
runner := jobRunner{key: "test", bundle: b, job: job}
|
||||
|
||||
m := mocks.NewMockWorkspaceClient(t)
|
||||
b.SetWorkpaceClient(m.WorkspaceClient)
|
||||
ctx := context.Background()
|
||||
ctx = cmdio.InContext(ctx, cmdio.NewIO(flags.OutputText, &bytes.Buffer{}, &bytes.Buffer{}, &bytes.Buffer{}, "", "..."))
|
||||
ctx = cmdio.NewContext(ctx, cmdio.NewLogger(flags.ModeAppend))
|
||||
|
||||
jobApi := m.GetMockJobsAPI()
|
||||
|
||||
// The runner should not try and cancel existing job runs for unpaused continuous jobs.
|
||||
jobApi.AssertNotCalled(t, "ListRunsAll")
|
||||
jobApi.AssertNotCalled(t, "CancelRun")
|
||||
|
||||
// Mock the runner triggering a job run
|
||||
mockWaitForRun := &jobs.WaitGetRunJobTerminatedOrSkipped[jobs.RunNowResponse]{
|
||||
Poll: func(d time.Duration, f func(*jobs.Run)) (*jobs.Run, error) {
|
||||
return &jobs.Run{
|
||||
State: &jobs.RunState{
|
||||
ResultState: jobs.RunResultStateSuccess,
|
||||
},
|
||||
}, nil
|
||||
},
|
||||
}
|
||||
jobApi.EXPECT().RunNow(mock.Anything, jobs.RunNow{
|
||||
JobId: 123,
|
||||
}).Return(mockWaitForRun, nil)
|
||||
|
||||
// Mock the runner getting the job output
|
||||
jobApi.EXPECT().GetRun(mock.Anything, jobs.GetRunRequest{}).Return(&jobs.Run{}, nil)
|
||||
|
||||
_, err := runner.Restart(ctx, &Options{})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
|
|
@ -1,69 +0,0 @@
|
|||
package run
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"golang.org/x/exp/maps"
|
||||
)
|
||||
|
||||
// RunnerLookup maps identifiers to a list of workloads that match that identifier.
|
||||
// The list can have more than 1 entry if resources of different types use the
|
||||
// same key. When this happens, the user should disambiguate between them.
|
||||
type RunnerLookup map[string][]Runner
|
||||
|
||||
// ResourceKeys computes a map with
|
||||
func ResourceKeys(b *bundle.Bundle) (keyOnly RunnerLookup, keyWithType RunnerLookup) {
|
||||
keyOnly = make(RunnerLookup)
|
||||
keyWithType = make(RunnerLookup)
|
||||
|
||||
r := b.Config.Resources
|
||||
for k, v := range r.Jobs {
|
||||
kt := fmt.Sprintf("jobs.%s", k)
|
||||
w := jobRunner{key: key(kt), bundle: b, job: v}
|
||||
keyOnly[k] = append(keyOnly[k], &w)
|
||||
keyWithType[kt] = append(keyWithType[kt], &w)
|
||||
}
|
||||
for k, v := range r.Pipelines {
|
||||
kt := fmt.Sprintf("pipelines.%s", k)
|
||||
w := pipelineRunner{key: key(kt), bundle: b, pipeline: v}
|
||||
keyOnly[k] = append(keyOnly[k], &w)
|
||||
keyWithType[kt] = append(keyWithType[kt], &w)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// ResourceCompletionMap returns a map of resource keys to their respective names.
|
||||
func ResourceCompletionMap(b *bundle.Bundle) map[string]string {
|
||||
out := make(map[string]string)
|
||||
keyOnly, keyWithType := ResourceKeys(b)
|
||||
|
||||
// Keep track of resources we have seen by their fully qualified key.
|
||||
seen := make(map[string]bool)
|
||||
|
||||
// First add resources that can be identified by key alone.
|
||||
for k, v := range keyOnly {
|
||||
// Invariant: len(v) >= 1. See [ResourceKeys].
|
||||
if len(v) == 1 {
|
||||
seen[v[0].Key()] = true
|
||||
out[k] = v[0].Name()
|
||||
}
|
||||
}
|
||||
|
||||
// Then add resources that can only be identified by their type and key.
|
||||
for k, v := range keyWithType {
|
||||
// Invariant: len(v) == 1. See [ResourceKeys].
|
||||
_, ok := seen[v[0].Key()]
|
||||
if ok {
|
||||
continue
|
||||
}
|
||||
out[k] = v[0].Name()
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
// ResourceCompletions returns a list of keys that unambiguously reference resources in the bundle.
|
||||
func ResourceCompletions(b *bundle.Bundle) []string {
|
||||
return maps.Keys(ResourceCompletionMap(b))
|
||||
}
|
|
@ -1,25 +0,0 @@
|
|||
package run
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/bundle/config/resources"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestResourceCompletionsUnique(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"foo": {},
|
||||
"bar": {},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
assert.ElementsMatch(t, []string{"foo", "bar"}, ResourceCompletions(b))
|
||||
}
|
|
@ -178,6 +178,18 @@ func (r *pipelineRunner) Cancel(ctx context.Context) error {
|
|||
return err
|
||||
}
|
||||
|
||||
func (r *pipelineRunner) Restart(ctx context.Context, opts *Options) (output.RunOutput, error) {
|
||||
s := cmdio.Spinner(ctx)
|
||||
s <- "Cancelling the active pipeline update"
|
||||
err := r.Cancel(ctx)
|
||||
close(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return r.Run(ctx, opts)
|
||||
}
|
||||
|
||||
func (r *pipelineRunner) ParseArgs(args []string, opts *Options) error {
|
||||
if len(args) == 0 {
|
||||
return nil
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package run
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
@ -8,8 +9,12 @@ import (
|
|||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/bundle/config/resources"
|
||||
"github.com/databricks/cli/libs/cmdio"
|
||||
"github.com/databricks/cli/libs/flags"
|
||||
sdk_config "github.com/databricks/databricks-sdk-go/config"
|
||||
"github.com/databricks/databricks-sdk-go/experimental/mocks"
|
||||
"github.com/databricks/databricks-sdk-go/service/pipelines"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
|
@ -47,3 +52,68 @@ func TestPipelineRunnerCancel(t *testing.T) {
|
|||
err := runner.Cancel(context.Background())
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestPipelineRunnerRestart(t *testing.T) {
|
||||
pipeline := &resources.Pipeline{
|
||||
ID: "123",
|
||||
}
|
||||
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Resources: config.Resources{
|
||||
Pipelines: map[string]*resources.Pipeline{
|
||||
"test_pipeline": pipeline,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
runner := pipelineRunner{key: "test", bundle: b, pipeline: pipeline}
|
||||
|
||||
m := mocks.NewMockWorkspaceClient(t)
|
||||
m.WorkspaceClient.Config = &sdk_config.Config{
|
||||
Host: "https://test.com",
|
||||
}
|
||||
b.SetWorkpaceClient(m.WorkspaceClient)
|
||||
ctx := context.Background()
|
||||
ctx = cmdio.InContext(ctx, cmdio.NewIO(flags.OutputText, &bytes.Buffer{}, &bytes.Buffer{}, &bytes.Buffer{}, "", "..."))
|
||||
ctx = cmdio.NewContext(ctx, cmdio.NewLogger(flags.ModeAppend))
|
||||
|
||||
mockWait := &pipelines.WaitGetPipelineIdle[struct{}]{
|
||||
Poll: func(time.Duration, func(*pipelines.GetPipelineResponse)) (*pipelines.GetPipelineResponse, error) {
|
||||
return nil, nil
|
||||
},
|
||||
}
|
||||
|
||||
pipelineApi := m.GetMockPipelinesAPI()
|
||||
pipelineApi.EXPECT().Stop(mock.Anything, pipelines.StopRequest{
|
||||
PipelineId: "123",
|
||||
}).Return(mockWait, nil)
|
||||
|
||||
pipelineApi.EXPECT().GetByPipelineId(mock.Anything, "123").Return(&pipelines.GetPipelineResponse{}, nil)
|
||||
|
||||
// Mock runner starting a new update
|
||||
pipelineApi.EXPECT().StartUpdate(mock.Anything, pipelines.StartUpdate{
|
||||
PipelineId: "123",
|
||||
}).Return(&pipelines.StartUpdateResponse{
|
||||
UpdateId: "456",
|
||||
}, nil)
|
||||
|
||||
// Mock runner polling for events
|
||||
pipelineApi.EXPECT().ListPipelineEventsAll(mock.Anything, pipelines.ListPipelineEventsRequest{
|
||||
Filter: `update_id = '456'`,
|
||||
MaxResults: 100,
|
||||
PipelineId: "123",
|
||||
}).Return([]pipelines.PipelineEvent{}, nil)
|
||||
|
||||
// Mock runner polling for update status
|
||||
pipelineApi.EXPECT().GetUpdateByPipelineIdAndUpdateId(mock.Anything, "123", "456").
|
||||
Return(&pipelines.GetUpdateResponse{
|
||||
Update: &pipelines.UpdateInfo{
|
||||
State: pipelines.UpdateInfoStateCompleted,
|
||||
},
|
||||
}, nil)
|
||||
|
||||
_, err := runner.Restart(ctx, &Options{})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
|
|
@ -3,9 +3,10 @@ package run
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config/resources"
|
||||
refs "github.com/databricks/cli/bundle/resources"
|
||||
"github.com/databricks/cli/bundle/run/output"
|
||||
)
|
||||
|
||||
|
@ -27,6 +28,10 @@ type Runner interface {
|
|||
// Run the underlying worklow.
|
||||
Run(ctx context.Context, opts *Options) (output.RunOutput, error)
|
||||
|
||||
// Restart the underlying workflow by cancelling any existing runs before
|
||||
// starting a new one.
|
||||
Restart(ctx context.Context, opts *Options) (output.RunOutput, error)
|
||||
|
||||
// Cancel the underlying workflow.
|
||||
Cancel(ctx context.Context) error
|
||||
|
||||
|
@ -34,34 +39,24 @@ type Runner interface {
|
|||
argsHandler
|
||||
}
|
||||
|
||||
// Find locates a runner matching the specified argument.
|
||||
//
|
||||
// Its behavior is as follows:
|
||||
// 1. Try to find a resource with <key> identical to the argument.
|
||||
// 2. Try to find a resource with <type>.<key> identical to the argument.
|
||||
//
|
||||
// If an argument resolves to multiple resources, it returns an error.
|
||||
func Find(b *bundle.Bundle, arg string) (Runner, error) {
|
||||
keyOnly, keyWithType := ResourceKeys(b)
|
||||
if len(keyWithType) == 0 {
|
||||
return nil, fmt.Errorf("bundle defines no resources")
|
||||
// IsRunnable returns a filter that only allows runnable resources.
|
||||
func IsRunnable(ref refs.Reference) bool {
|
||||
switch ref.Resource.(type) {
|
||||
case *resources.Job, *resources.Pipeline:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// ToRunner converts a resource reference to a runnable resource.
|
||||
func ToRunner(b *bundle.Bundle, ref refs.Reference) (Runner, error) {
|
||||
switch resource := ref.Resource.(type) {
|
||||
case *resources.Job:
|
||||
return &jobRunner{key: key(ref.KeyWithType), bundle: b, job: resource}, nil
|
||||
case *resources.Pipeline:
|
||||
return &pipelineRunner{key: key(ref.KeyWithType), bundle: b, pipeline: resource}, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported resource type: %T", resource)
|
||||
}
|
||||
|
||||
runners, ok := keyOnly[arg]
|
||||
if !ok {
|
||||
runners, ok = keyWithType[arg]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("no such resource: %s", arg)
|
||||
}
|
||||
}
|
||||
|
||||
if len(runners) != 1 {
|
||||
var keys []string
|
||||
for _, runner := range runners {
|
||||
keys = append(keys, runner.Key())
|
||||
}
|
||||
return nil, fmt.Errorf("ambiguous: %s (can resolve to all of %s)", arg, strings.Join(keys, ", "))
|
||||
}
|
||||
|
||||
return runners[0], nil
|
||||
}
|
||||
|
|
|
@ -3,82 +3,14 @@ package run
|
|||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/bundle/config/resources"
|
||||
refs "github.com/databricks/cli/bundle/resources"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestFindNoResources(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Resources: config.Resources{},
|
||||
},
|
||||
}
|
||||
|
||||
_, err := Find(b, "foo")
|
||||
assert.ErrorContains(t, err, "bundle defines no resources")
|
||||
}
|
||||
|
||||
func TestFindSingleArg(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"foo": {},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
_, err := Find(b, "foo")
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestFindSingleArgNotFound(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"foo": {},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
_, err := Find(b, "bar")
|
||||
assert.ErrorContains(t, err, "no such resource: bar")
|
||||
}
|
||||
|
||||
func TestFindSingleArgAmbiguous(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"key": {},
|
||||
},
|
||||
Pipelines: map[string]*resources.Pipeline{
|
||||
"key": {},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
_, err := Find(b, "key")
|
||||
assert.ErrorContains(t, err, "ambiguous: ")
|
||||
}
|
||||
|
||||
func TestFindSingleArgWithType(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"key": {},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
_, err := Find(b, "jobs.key")
|
||||
assert.NoError(t, err)
|
||||
func TestRunner_IsRunnable(t *testing.T) {
|
||||
assert.True(t, IsRunnable(refs.Reference{Resource: &resources.Job{}}))
|
||||
assert.True(t, IsRunnable(refs.Reference{Resource: &resources.Pipeline{}}))
|
||||
assert.False(t, IsRunnable(refs.Reference{Resource: &resources.MlflowModel{}}))
|
||||
assert.False(t, IsRunnable(refs.Reference{Resource: &resources.MlflowExperiment{}}))
|
||||
}
|
||||
|
|
|
@ -59,9 +59,14 @@ func TestJsonSchema(t *testing.T) {
|
|||
}
|
||||
|
||||
// Assert enum values are loaded
|
||||
schedule := walk(s.Definitions, "github.com", "databricks", "databricks-sdk-go", "service", "catalog.MonitorCronSchedule")
|
||||
assert.Contains(t, schedule.AnyOf[0].Properties["pause_status"].Enum, "PAUSED")
|
||||
assert.Contains(t, schedule.AnyOf[0].Properties["pause_status"].Enum, "UNPAUSED")
|
||||
schedule := walk(s.Definitions, "github.com", "databricks", "databricks-sdk-go", "service", "pipelines.RestartWindow")
|
||||
assert.Contains(t, schedule.AnyOf[0].Properties["days_of_week"].Enum, "MONDAY")
|
||||
assert.Contains(t, schedule.AnyOf[0].Properties["days_of_week"].Enum, "TUESDAY")
|
||||
assert.Contains(t, schedule.AnyOf[0].Properties["days_of_week"].Enum, "WEDNESDAY")
|
||||
assert.Contains(t, schedule.AnyOf[0].Properties["days_of_week"].Enum, "THURSDAY")
|
||||
assert.Contains(t, schedule.AnyOf[0].Properties["days_of_week"].Enum, "FRIDAY")
|
||||
assert.Contains(t, schedule.AnyOf[0].Properties["days_of_week"].Enum, "SATURDAY")
|
||||
assert.Contains(t, schedule.AnyOf[0].Properties["days_of_week"].Enum, "SUNDAY")
|
||||
|
||||
providers := walk(s.Definitions, "github.com", "databricks", "databricks-sdk-go", "service", "jobs.GitProvider")
|
||||
assert.Contains(t, providers.Enum, "gitHub")
|
||||
|
|
|
@ -180,6 +180,69 @@
|
|||
}
|
||||
]
|
||||
},
|
||||
"resources.Dashboard": {
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"create_time": {
|
||||
"description": "The timestamp of when the dashboard was created.",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"dashboard_id": {
|
||||
"description": "UUID identifying the dashboard.",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"display_name": {
|
||||
"description": "The display name of the dashboard.",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"embed_credentials": {
|
||||
"$ref": "#/$defs/bool"
|
||||
},
|
||||
"etag": {
|
||||
"description": "The etag for the dashboard. Can be optionally provided on updates to ensure that the dashboard\nhas not been modified since the last read.\nThis field is excluded in List Dashboards responses.",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"file_path": {
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"lifecycle_state": {
|
||||
"description": "The state of the dashboard resource. Used for tracking trashed status.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/dashboards.LifecycleState"
|
||||
},
|
||||
"parent_path": {
|
||||
"description": "The workspace path of the folder containing the dashboard. Includes leading slash and no\ntrailing slash.\nThis field is excluded in List Dashboards responses.",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"path": {
|
||||
"description": "The workspace path of the dashboard asset, including the file name.\nExported dashboards always have the file extension `.lvdash.json`.\nThis field is excluded in List Dashboards responses.",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"permissions": {
|
||||
"$ref": "#/$defs/slice/github.com/databricks/cli/bundle/config/resources.Permission"
|
||||
},
|
||||
"serialized_dashboard": {
|
||||
"description": "The contents of the dashboard in serialized string form.\nThis field is excluded in List Dashboards responses.\nUse the [get dashboard API](https://docs.databricks.com/api/workspace/lakeview/get)\nto retrieve an example response, which includes the `serialized_dashboard` field.\nThis field provides the structure of the JSON string that represents the dashboard's\nlayout and components.",
|
||||
"$ref": "#/$defs/interface"
|
||||
},
|
||||
"update_time": {
|
||||
"description": "The timestamp of when the dashboard was last updated by the user.\nThis field is excluded in List Dashboards responses.",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"warehouse_id": {
|
||||
"description": "The warehouse ID used to run the dashboard.",
|
||||
"$ref": "#/$defs/string"
|
||||
}
|
||||
},
|
||||
"additionalProperties": false
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}"
|
||||
}
|
||||
]
|
||||
},
|
||||
"resources.Grant": {
|
||||
"anyOf": [
|
||||
{
|
||||
|
@ -209,6 +272,10 @@
|
|||
{
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"budget_policy_id": {
|
||||
"description": "The id of the user specified budget policy to use for this job.\nIf not specified, a default budget policy may be applied when creating or modifying the job.\nSee `effective_budget_policy_id` for the budget policy used by this workload.",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"continuous": {
|
||||
"description": "An optional continuous property for this job. The continuous property will ensure that there is always one run executing. Only one of `schedule` and `continuous` can be used.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.Continuous"
|
||||
|
@ -505,7 +572,7 @@
|
|||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.Filters"
|
||||
},
|
||||
"gateway_definition": {
|
||||
"description": "The definition of a gateway pipeline to support CDC.",
|
||||
"description": "The definition of a gateway pipeline to support change data capture.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.IngestionGatewayPipelineDefinition"
|
||||
},
|
||||
"id": {
|
||||
|
@ -535,6 +602,10 @@
|
|||
"description": "Whether Photon is enabled for this pipeline.",
|
||||
"$ref": "#/$defs/bool"
|
||||
},
|
||||
"restart_window": {
|
||||
"description": "Restart window of this pipeline.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.RestartWindow"
|
||||
},
|
||||
"schema": {
|
||||
"description": "The default schema (database) where tables are read from or published to. The presence of this field implies that the pipeline is in direct publishing mode.",
|
||||
"$ref": "#/$defs/string"
|
||||
|
@ -613,6 +684,9 @@
|
|||
"description": "Configuration for monitoring snapshot tables.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/catalog.MonitorSnapshot"
|
||||
},
|
||||
"table_name": {
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"time_series": {
|
||||
"description": "Configuration for monitoring time series tables.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/catalog.MonitorTimeSeries"
|
||||
|
@ -624,6 +698,7 @@
|
|||
},
|
||||
"additionalProperties": false,
|
||||
"required": [
|
||||
"table_name",
|
||||
"assets_dir",
|
||||
"output_schema_name"
|
||||
]
|
||||
|
@ -1050,6 +1125,9 @@
|
|||
"clusters": {
|
||||
"$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.Cluster"
|
||||
},
|
||||
"dashboards": {
|
||||
"$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.Dashboard"
|
||||
},
|
||||
"experiments": {
|
||||
"$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.MlflowExperiment"
|
||||
},
|
||||
|
@ -1240,11 +1318,7 @@
|
|||
"properties": {
|
||||
"pause_status": {
|
||||
"description": "Read only field that indicates whether a schedule is paused or not.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/catalog.MonitorCronSchedulePauseStatus",
|
||||
"enum": [
|
||||
"UNPAUSED",
|
||||
"PAUSED"
|
||||
]
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/catalog.MonitorCronSchedulePauseStatus"
|
||||
},
|
||||
"quartz_cron_expression": {
|
||||
"description": "The expression that determines when to run the monitor. See [examples](https://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/crontrigger.html).\n",
|
||||
|
@ -1268,7 +1342,12 @@
|
|||
]
|
||||
},
|
||||
"catalog.MonitorCronSchedulePauseStatus": {
|
||||
"type": "string"
|
||||
"type": "string",
|
||||
"description": "Read only field that indicates whether a schedule is paused or not.",
|
||||
"enum": [
|
||||
"UNPAUSED",
|
||||
"PAUSED"
|
||||
]
|
||||
},
|
||||
"catalog.MonitorDataClassificationConfig": {
|
||||
"anyOf": [
|
||||
|
@ -1333,11 +1412,7 @@
|
|||
},
|
||||
"problem_type": {
|
||||
"description": "Problem type the model aims to solve. Determines the type of model-quality metrics that will be computed.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/catalog.MonitorInferenceLogProblemType",
|
||||
"enum": [
|
||||
"PROBLEM_TYPE_CLASSIFICATION",
|
||||
"PROBLEM_TYPE_REGRESSION"
|
||||
]
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/catalog.MonitorInferenceLogProblemType"
|
||||
},
|
||||
"timestamp_col": {
|
||||
"description": "Column that contains the timestamps of requests. The column must be one of the following:\n- A ``TimestampType`` column\n- A column whose values can be converted to timestamps through the pyspark\n ``to_timestamp`` [function](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.to_timestamp.html).\n",
|
||||
|
@ -1360,7 +1435,12 @@
|
|||
]
|
||||
},
|
||||
"catalog.MonitorInferenceLogProblemType": {
|
||||
"type": "string"
|
||||
"type": "string",
|
||||
"description": "Problem type the model aims to solve. Determines the type of model-quality metrics that will be computed.",
|
||||
"enum": [
|
||||
"PROBLEM_TYPE_CLASSIFICATION",
|
||||
"PROBLEM_TYPE_REGRESSION"
|
||||
]
|
||||
},
|
||||
"catalog.MonitorMetric": {
|
||||
"anyOf": [
|
||||
|
@ -1385,12 +1465,7 @@
|
|||
},
|
||||
"type": {
|
||||
"description": "Can only be one of ``\"CUSTOM_METRIC_TYPE_AGGREGATE\"``, ``\"CUSTOM_METRIC_TYPE_DERIVED\"``, or ``\"CUSTOM_METRIC_TYPE_DRIFT\"``.\nThe ``\"CUSTOM_METRIC_TYPE_AGGREGATE\"`` and ``\"CUSTOM_METRIC_TYPE_DERIVED\"`` metrics\nare computed on a single table, whereas the ``\"CUSTOM_METRIC_TYPE_DRIFT\"`` compare metrics across\nbaseline and input table, or across the two consecutive time windows.\n- CUSTOM_METRIC_TYPE_AGGREGATE: only depend on the existing columns in your table\n- CUSTOM_METRIC_TYPE_DERIVED: depend on previously computed aggregate metrics\n- CUSTOM_METRIC_TYPE_DRIFT: depend on previously computed aggregate or derived metrics\n",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/catalog.MonitorMetricType",
|
||||
"enum": [
|
||||
"CUSTOM_METRIC_TYPE_AGGREGATE",
|
||||
"CUSTOM_METRIC_TYPE_DERIVED",
|
||||
"CUSTOM_METRIC_TYPE_DRIFT"
|
||||
]
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/catalog.MonitorMetricType"
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
|
@ -1409,7 +1484,13 @@
|
|||
]
|
||||
},
|
||||
"catalog.MonitorMetricType": {
|
||||
"type": "string"
|
||||
"type": "string",
|
||||
"description": "Can only be one of ``\"CUSTOM_METRIC_TYPE_AGGREGATE\"``, ``\"CUSTOM_METRIC_TYPE_DERIVED\"``, or ``\"CUSTOM_METRIC_TYPE_DRIFT\"``.\nThe ``\"CUSTOM_METRIC_TYPE_AGGREGATE\"`` and ``\"CUSTOM_METRIC_TYPE_DERIVED\"`` metrics\nare computed on a single table, whereas the ``\"CUSTOM_METRIC_TYPE_DRIFT\"`` compare metrics across\nbaseline and input table, or across the two consecutive time windows.\n- CUSTOM_METRIC_TYPE_AGGREGATE: only depend on the existing columns in your table\n- CUSTOM_METRIC_TYPE_DERIVED: depend on previously computed aggregate metrics\n- CUSTOM_METRIC_TYPE_DRIFT: depend on previously computed aggregate or derived metrics\n",
|
||||
"enum": [
|
||||
"CUSTOM_METRIC_TYPE_AGGREGATE",
|
||||
"CUSTOM_METRIC_TYPE_DERIVED",
|
||||
"CUSTOM_METRIC_TYPE_DRIFT"
|
||||
]
|
||||
},
|
||||
"catalog.MonitorNotifications": {
|
||||
"anyOf": [
|
||||
|
@ -2276,6 +2357,13 @@
|
|||
}
|
||||
]
|
||||
},
|
||||
"dashboards.LifecycleState": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"ACTIVE",
|
||||
"TRASHED"
|
||||
]
|
||||
},
|
||||
"jobs.Condition": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
|
@ -3053,7 +3141,7 @@
|
|||
"$ref": "#/$defs/slice/string"
|
||||
},
|
||||
"jar_params": {
|
||||
"description": "A list of parameters for jobs with Spark JAR tasks, for example `\"jar_params\": [\"john doe\", \"35\"]`.\nThe parameters are used to invoke the main function of the main class specified in the Spark JAR task.\nIf not specified upon `run-now`, it defaults to an empty list.\njar_params cannot be specified in conjunction with notebook_params.\nThe JSON representation of this field (for example `{\"jar_params\":[\"john doe\",\"35\"]}`) cannot exceed 10,000 bytes.\n\nUse [Task parameter variables](/jobs.html\\\"#parameter-variables\\\") to set parameters containing information about job runs.",
|
||||
"description": "A list of parameters for jobs with Spark JAR tasks, for example `\"jar_params\": [\"john doe\", \"35\"]`.\nThe parameters are used to invoke the main function of the main class specified in the Spark JAR task.\nIf not specified upon `run-now`, it defaults to an empty list.\njar_params cannot be specified in conjunction with notebook_params.\nThe JSON representation of this field (for example `{\"jar_params\":[\"john doe\",\"35\"]}`) cannot exceed 10,000 bytes.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.",
|
||||
"$ref": "#/$defs/slice/string"
|
||||
},
|
||||
"job_id": {
|
||||
|
@ -3387,11 +3475,11 @@
|
|||
"type": "object",
|
||||
"properties": {
|
||||
"condition_task": {
|
||||
"description": "If condition_task, specifies a condition with an outcome that can be used to control the execution of other tasks. Does not require a cluster to execute and does not support retries or notifications.",
|
||||
"description": "The task evaluates a condition that can be used to control the execution of other tasks when the `condition_task` field is present.\nThe condition task does not require a cluster to execute and does not support retries or notifications.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.ConditionTask"
|
||||
},
|
||||
"dbt_task": {
|
||||
"description": "If dbt_task, indicates that this must execute a dbt task. It requires both Databricks SQL and the ability to use a serverless or a pro SQL warehouse.",
|
||||
"description": "The task runs one or more dbt commands when the `dbt_task` field is present. The dbt task requires both Databricks SQL and the ability to use a serverless or a pro SQL warehouse.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.DbtTask"
|
||||
},
|
||||
"depends_on": {
|
||||
|
@ -3419,7 +3507,7 @@
|
|||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"for_each_task": {
|
||||
"description": "If for_each_task, indicates that this task must execute the nested task within it.",
|
||||
"description": "The task executes a nested task for every input provided when the `for_each_task` field is present.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.ForEachTask"
|
||||
},
|
||||
"health": {
|
||||
|
@ -3446,7 +3534,7 @@
|
|||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/compute.ClusterSpec"
|
||||
},
|
||||
"notebook_task": {
|
||||
"description": "If notebook_task, indicates that this task must run a notebook. This field may not be specified in conjunction with spark_jar_task.",
|
||||
"description": "The task runs a notebook when the `notebook_task` field is present.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.NotebookTask"
|
||||
},
|
||||
"notification_settings": {
|
||||
|
@ -3454,11 +3542,11 @@
|
|||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.TaskNotificationSettings"
|
||||
},
|
||||
"pipeline_task": {
|
||||
"description": "If pipeline_task, indicates that this task must execute a Pipeline.",
|
||||
"description": "The task triggers a pipeline update when the `pipeline_task` field is present. Only pipelines configured to use triggered more are supported.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.PipelineTask"
|
||||
},
|
||||
"python_wheel_task": {
|
||||
"description": "If python_wheel_task, indicates that this job must execute a PythonWheel.",
|
||||
"description": "The task runs a Python wheel when the `python_wheel_task` field is present.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.PythonWheelTask"
|
||||
},
|
||||
"retry_on_timeout": {
|
||||
|
@ -3470,23 +3558,23 @@
|
|||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.RunIf"
|
||||
},
|
||||
"run_job_task": {
|
||||
"description": "If run_job_task, indicates that this task must execute another job.",
|
||||
"description": "The task triggers another job when the `run_job_task` field is present.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.RunJobTask"
|
||||
},
|
||||
"spark_jar_task": {
|
||||
"description": "If spark_jar_task, indicates that this task must run a JAR.",
|
||||
"description": "The task runs a JAR when the `spark_jar_task` field is present.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.SparkJarTask"
|
||||
},
|
||||
"spark_python_task": {
|
||||
"description": "If spark_python_task, indicates that this task must run a Python file.",
|
||||
"description": "The task runs a Python file when the `spark_python_task` field is present.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.SparkPythonTask"
|
||||
},
|
||||
"spark_submit_task": {
|
||||
"description": "If `spark_submit_task`, indicates that this task must be launched by the spark submit script. This task can run only on new clusters.\n\nIn the `new_cluster` specification, `libraries` and `spark_conf` are not supported. Instead, use `--jars` and `--py-files` to add Java and Python libraries and `--conf` to set the Spark configurations.\n\n`master`, `deploy-mode`, and `executor-cores` are automatically configured by Databricks; you _cannot_ specify them in parameters.\n\nBy default, the Spark submit job uses all available memory (excluding reserved memory for Databricks services). You can set `--driver-memory`, and `--executor-memory` to a smaller value to leave some room for off-heap usage.\n\nThe `--jars`, `--py-files`, `--files` arguments support DBFS and S3 paths.",
|
||||
"description": "(Legacy) The task runs the spark-submit script when the `spark_submit_task` field is present. This task can run only on new clusters and is not compatible with serverless compute.\n\nIn the `new_cluster` specification, `libraries` and `spark_conf` are not supported. Instead, use `--jars` and `--py-files` to add Java and Python libraries and `--conf` to set the Spark configurations.\n\n`master`, `deploy-mode`, and `executor-cores` are automatically configured by Databricks; you _cannot_ specify them in parameters.\n\nBy default, the Spark submit job uses all available memory (excluding reserved memory for Databricks services). You can set `--driver-memory`, and `--executor-memory` to a smaller value to leave some room for off-heap usage.\n\nThe `--jars`, `--py-files`, `--files` arguments support DBFS and S3 paths.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.SparkSubmitTask"
|
||||
},
|
||||
"sql_task": {
|
||||
"description": "If sql_task, indicates that this job must execute a SQL task.",
|
||||
"description": "The task runs a SQL query or file, or it refreshes a SQL alert or a legacy SQL dashboard when the `sql_task` field is present.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.SqlTask"
|
||||
},
|
||||
"task_key": {
|
||||
|
@ -3772,12 +3860,7 @@
|
|||
},
|
||||
"status": {
|
||||
"description": "Current status of `model_version`",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/ml.ModelVersionStatus",
|
||||
"enum": [
|
||||
"PENDING_REGISTRATION",
|
||||
"FAILED_REGISTRATION",
|
||||
"READY"
|
||||
]
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/ml.ModelVersionStatus"
|
||||
},
|
||||
"status_message": {
|
||||
"description": "Details on current `status`, if it is pending or failed.",
|
||||
|
@ -3805,7 +3888,13 @@
|
|||
]
|
||||
},
|
||||
"ml.ModelVersionStatus": {
|
||||
"type": "string"
|
||||
"type": "string",
|
||||
"description": "Current status of `model_version`",
|
||||
"enum": [
|
||||
"PENDING_REGISTRATION",
|
||||
"FAILED_REGISTRATION",
|
||||
"READY"
|
||||
]
|
||||
},
|
||||
"ml.ModelVersionTag": {
|
||||
"anyOf": [
|
||||
|
@ -3901,12 +3990,16 @@
|
|||
{
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"report": {
|
||||
"description": "Select a specific source report.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.ReportSpec"
|
||||
},
|
||||
"schema": {
|
||||
"description": "Select tables from a specific source schema.",
|
||||
"description": "Select all tables from a specific source schema.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.SchemaSpec"
|
||||
},
|
||||
"table": {
|
||||
"description": "Select tables from a specific source table.",
|
||||
"description": "Select a specific source table.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.TableSpec"
|
||||
}
|
||||
},
|
||||
|
@ -3924,7 +4017,11 @@
|
|||
"type": "object",
|
||||
"properties": {
|
||||
"connection_id": {
|
||||
"description": "Immutable. The Unity Catalog connection this gateway pipeline uses to communicate with the source.",
|
||||
"description": "[Deprecated, use connection_name instead] Immutable. The Unity Catalog connection that this gateway pipeline uses to communicate with the source.",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"connection_name": {
|
||||
"description": "Immutable. The Unity Catalog connection that this gateway pipeline uses to communicate with the source.",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"gateway_storage_catalog": {
|
||||
|
@ -3954,11 +4051,11 @@
|
|||
"type": "object",
|
||||
"properties": {
|
||||
"connection_name": {
|
||||
"description": "Immutable. The Unity Catalog connection this ingestion pipeline uses to communicate with the source. Specify either ingestion_gateway_id or connection_name.",
|
||||
"description": "Immutable. The Unity Catalog connection that this ingestion pipeline uses to communicate with the source. This is used with connectors for applications like Salesforce, Workday, and so on.",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"ingestion_gateway_id": {
|
||||
"description": "Immutable. Identifier for the ingestion gateway used by this ingestion pipeline to communicate with the source. Specify either ingestion_gateway_id or connection_name.",
|
||||
"description": "Immutable. Identifier for the gateway that is used by this ingestion pipeline to communicate with the source database. This is used with connectors to databases like SQL Server.",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"objects": {
|
||||
|
@ -4135,11 +4232,7 @@
|
|||
},
|
||||
"mode": {
|
||||
"description": "Databricks Enhanced Autoscaling optimizes cluster utilization by automatically\nallocating cluster resources based on workload volume, with minimal impact to\nthe data processing latency of your pipelines. Enhanced Autoscaling is available\nfor `updates` clusters only. The legacy autoscaling feature is used for `maintenance`\nclusters.\n",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.PipelineClusterAutoscaleMode",
|
||||
"enum": [
|
||||
"ENHANCED",
|
||||
"LEGACY"
|
||||
]
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.PipelineClusterAutoscaleMode"
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
|
@ -4155,7 +4248,12 @@
|
|||
]
|
||||
},
|
||||
"pipelines.PipelineClusterAutoscaleMode": {
|
||||
"type": "string"
|
||||
"type": "string",
|
||||
"description": "Databricks Enhanced Autoscaling optimizes cluster utilization by automatically\nallocating cluster resources based on workload volume, with minimal impact to\nthe data processing latency of your pipelines. Enhanced Autoscaling is available\nfor `updates` clusters only. The legacy autoscaling feature is used for `maintenance`\nclusters.\n",
|
||||
"enum": [
|
||||
"ENHANCED",
|
||||
"LEGACY"
|
||||
]
|
||||
},
|
||||
"pipelines.PipelineDeployment": {
|
||||
"anyOf": [
|
||||
|
@ -4233,6 +4331,81 @@
|
|||
}
|
||||
]
|
||||
},
|
||||
"pipelines.ReportSpec": {
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"destination_catalog": {
|
||||
"description": "Required. Destination catalog to store table.",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"destination_schema": {
|
||||
"description": "Required. Destination schema to store table.",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"destination_table": {
|
||||
"description": "Required. Destination table name. The pipeline fails if a table with that name already exists.",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"source_url": {
|
||||
"description": "Required. Report URL in the source system.",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"table_configuration": {
|
||||
"description": "Configuration settings to control the ingestion of tables. These settings override the table_configuration defined in the IngestionPipelineDefinition object.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.TableSpecificConfig"
|
||||
}
|
||||
},
|
||||
"additionalProperties": false
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}"
|
||||
}
|
||||
]
|
||||
},
|
||||
"pipelines.RestartWindow": {
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"days_of_week": {
|
||||
"description": "Days of week in which the restart is allowed to happen (within a five-hour window starting at start_hour).\nIf not specified all days of the week will be used.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.RestartWindowDaysOfWeek",
|
||||
"enum": [
|
||||
"MONDAY",
|
||||
"TUESDAY",
|
||||
"WEDNESDAY",
|
||||
"THURSDAY",
|
||||
"FRIDAY",
|
||||
"SATURDAY",
|
||||
"SUNDAY"
|
||||
]
|
||||
},
|
||||
"start_hour": {
|
||||
"description": "An integer between 0 and 23 denoting the start hour for the restart window in the 24-hour day.\nContinuous pipeline restart is triggered only within a five-hour window starting at this hour.",
|
||||
"$ref": "#/$defs/int"
|
||||
},
|
||||
"time_zone_id": {
|
||||
"description": "Time zone id of restart window. See https://docs.databricks.com/sql/language-manual/sql-ref-syntax-aux-conf-mgmt-set-timezone.html for details.\nIf not specified, UTC will be used.",
|
||||
"$ref": "#/$defs/string"
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
"required": [
|
||||
"start_hour"
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}"
|
||||
}
|
||||
]
|
||||
},
|
||||
"pipelines.RestartWindowDaysOfWeek": {
|
||||
"type": "string"
|
||||
},
|
||||
"pipelines.SchemaSpec": {
|
||||
"anyOf": [
|
||||
{
|
||||
|
@ -4281,7 +4454,7 @@
|
|||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"destination_table": {
|
||||
"description": "Optional. Destination table name. The pipeline fails If a table with that name already exists. If not set, the source table name is used.",
|
||||
"description": "Optional. Destination table name. The pipeline fails if a table with that name already exists. If not set, the source table name is used.",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"source_catalog": {
|
||||
|
@ -4324,11 +4497,11 @@
|
|||
},
|
||||
"scd_type": {
|
||||
"description": "The SCD type to use to ingest the table.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.TableSpecificConfigScdType",
|
||||
"enum": [
|
||||
"SCD_TYPE_1",
|
||||
"SCD_TYPE_2"
|
||||
]
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.TableSpecificConfigScdType"
|
||||
},
|
||||
"sequence_by": {
|
||||
"description": "The column names specifying the logical order of events in the source data. Delta Live Tables uses this sequencing to handle change events that arrive out of order.",
|
||||
"$ref": "#/$defs/slice/string"
|
||||
}
|
||||
},
|
||||
"additionalProperties": false
|
||||
|
@ -4340,7 +4513,12 @@
|
|||
]
|
||||
},
|
||||
"pipelines.TableSpecificConfigScdType": {
|
||||
"type": "string"
|
||||
"type": "string",
|
||||
"description": "The SCD type to use to ingest the table.",
|
||||
"enum": [
|
||||
"SCD_TYPE_1",
|
||||
"SCD_TYPE_2"
|
||||
]
|
||||
},
|
||||
"serving.Ai21LabsConfig": {
|
||||
"anyOf": [
|
||||
|
@ -4429,11 +4607,7 @@
|
|||
"properties": {
|
||||
"behavior": {
|
||||
"description": "Behavior for PII filter. Currently only 'BLOCK' is supported. If 'BLOCK' is set for the input guardrail and the request contains PII, the request is not sent to the model server and 400 status code is returned; if 'BLOCK' is set for the output guardrail and the model response contains PII, the PII info in the response is redacted and 400 status code is returned.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.AiGatewayGuardrailPiiBehaviorBehavior",
|
||||
"enum": [
|
||||
"NONE",
|
||||
"BLOCK"
|
||||
]
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.AiGatewayGuardrailPiiBehaviorBehavior"
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
|
@ -4448,7 +4622,12 @@
|
|||
]
|
||||
},
|
||||
"serving.AiGatewayGuardrailPiiBehaviorBehavior": {
|
||||
"type": "string"
|
||||
"type": "string",
|
||||
"description": "Behavior for PII filter. Currently only 'BLOCK' is supported. If 'BLOCK' is set for the input guardrail and the request contains PII, the request is not sent to the model server and 400 status code is returned; if 'BLOCK' is set for the output guardrail and the model response contains PII, the PII info in the response is redacted and 400 status code is returned.",
|
||||
"enum": [
|
||||
"NONE",
|
||||
"BLOCK"
|
||||
]
|
||||
},
|
||||
"serving.AiGatewayGuardrails": {
|
||||
"anyOf": [
|
||||
|
@ -4513,18 +4692,11 @@
|
|||
},
|
||||
"key": {
|
||||
"description": "Key field for a rate limit. Currently, only 'user' and 'endpoint' are supported, with 'endpoint' being the default if not specified.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.AiGatewayRateLimitKey",
|
||||
"enum": [
|
||||
"user",
|
||||
"endpoint"
|
||||
]
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.AiGatewayRateLimitKey"
|
||||
},
|
||||
"renewal_period": {
|
||||
"description": "Renewal period field for a rate limit. Currently, only 'minute' is supported.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.AiGatewayRateLimitRenewalPeriod",
|
||||
"enum": [
|
||||
"minute"
|
||||
]
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.AiGatewayRateLimitRenewalPeriod"
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
|
@ -4540,10 +4712,19 @@
|
|||
]
|
||||
},
|
||||
"serving.AiGatewayRateLimitKey": {
|
||||
"type": "string"
|
||||
"type": "string",
|
||||
"description": "Key field for a rate limit. Currently, only 'user' and 'endpoint' are supported, with 'endpoint' being the default if not specified.",
|
||||
"enum": [
|
||||
"user",
|
||||
"endpoint"
|
||||
]
|
||||
},
|
||||
"serving.AiGatewayRateLimitRenewalPeriod": {
|
||||
"type": "string"
|
||||
"type": "string",
|
||||
"description": "Renewal period field for a rate limit. Currently, only 'minute' is supported.",
|
||||
"enum": [
|
||||
"minute"
|
||||
]
|
||||
},
|
||||
"serving.AiGatewayUsageTrackingConfig": {
|
||||
"anyOf": [
|
||||
|
@ -4590,13 +4771,7 @@
|
|||
},
|
||||
"bedrock_provider": {
|
||||
"description": "The underlying provider in Amazon Bedrock. Supported values (case insensitive) include: Anthropic, Cohere, AI21Labs, Amazon.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.AmazonBedrockConfigBedrockProvider",
|
||||
"enum": [
|
||||
"anthropic",
|
||||
"cohere",
|
||||
"ai21labs",
|
||||
"amazon"
|
||||
]
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.AmazonBedrockConfigBedrockProvider"
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
|
@ -4612,7 +4787,14 @@
|
|||
]
|
||||
},
|
||||
"serving.AmazonBedrockConfigBedrockProvider": {
|
||||
"type": "string"
|
||||
"type": "string",
|
||||
"description": "The underlying provider in Amazon Bedrock. Supported values (case insensitive) include: Anthropic, Cohere, AI21Labs, Amazon.",
|
||||
"enum": [
|
||||
"anthropic",
|
||||
"cohere",
|
||||
"ai21labs",
|
||||
"amazon"
|
||||
]
|
||||
},
|
||||
"serving.AnthropicConfig": {
|
||||
"anyOf": [
|
||||
|
@ -4819,17 +5001,7 @@
|
|||
},
|
||||
"provider": {
|
||||
"description": "The name of the provider for the external model. Currently, the supported providers are 'ai21labs', 'anthropic',\n'amazon-bedrock', 'cohere', 'databricks-model-serving', 'google-cloud-vertex-ai', 'openai', and 'palm'.\",\n",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.ExternalModelProvider",
|
||||
"enum": [
|
||||
"ai21labs",
|
||||
"anthropic",
|
||||
"amazon-bedrock",
|
||||
"cohere",
|
||||
"databricks-model-serving",
|
||||
"google-cloud-vertex-ai",
|
||||
"openai",
|
||||
"palm"
|
||||
]
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.ExternalModelProvider"
|
||||
},
|
||||
"task": {
|
||||
"description": "The task type of the external model.",
|
||||
|
@ -4850,7 +5022,18 @@
|
|||
]
|
||||
},
|
||||
"serving.ExternalModelProvider": {
|
||||
"type": "string"
|
||||
"type": "string",
|
||||
"description": "The name of the provider for the external model. Currently, the supported providers are 'ai21labs', 'anthropic',\n'amazon-bedrock', 'cohere', 'databricks-model-serving', 'google-cloud-vertex-ai', 'openai', and 'palm'.\",\n",
|
||||
"enum": [
|
||||
"ai21labs",
|
||||
"anthropic",
|
||||
"amazon-bedrock",
|
||||
"cohere",
|
||||
"databricks-model-serving",
|
||||
"google-cloud-vertex-ai",
|
||||
"openai",
|
||||
"palm"
|
||||
]
|
||||
},
|
||||
"serving.GoogleCloudVertexAiConfig": {
|
||||
"anyOf": [
|
||||
|
@ -4956,18 +5139,11 @@
|
|||
},
|
||||
"key": {
|
||||
"description": "Key field for a serving endpoint rate limit. Currently, only 'user' and 'endpoint' are supported, with 'endpoint' being the default if not specified.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.RateLimitKey",
|
||||
"enum": [
|
||||
"user",
|
||||
"endpoint"
|
||||
]
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.RateLimitKey"
|
||||
},
|
||||
"renewal_period": {
|
||||
"description": "Renewal period field for a serving endpoint rate limit. Currently, only 'minute' is supported.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.RateLimitRenewalPeriod",
|
||||
"enum": [
|
||||
"minute"
|
||||
]
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.RateLimitRenewalPeriod"
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
|
@ -4983,10 +5159,19 @@
|
|||
]
|
||||
},
|
||||
"serving.RateLimitKey": {
|
||||
"type": "string"
|
||||
"type": "string",
|
||||
"description": "Key field for a serving endpoint rate limit. Currently, only 'user' and 'endpoint' are supported, with 'endpoint' being the default if not specified.",
|
||||
"enum": [
|
||||
"user",
|
||||
"endpoint"
|
||||
]
|
||||
},
|
||||
"serving.RateLimitRenewalPeriod": {
|
||||
"type": "string"
|
||||
"type": "string",
|
||||
"description": "Renewal period field for a serving endpoint rate limit. Currently, only 'minute' is supported.",
|
||||
"enum": [
|
||||
"minute"
|
||||
]
|
||||
},
|
||||
"serving.Route": {
|
||||
"anyOf": [
|
||||
|
@ -5111,23 +5296,11 @@
|
|||
},
|
||||
"workload_size": {
|
||||
"description": "The workload size of the served model. The workload size corresponds to a range of provisioned concurrency that the compute will autoscale between.\nA single unit of provisioned concurrency can process one request at a time.\nValid workload sizes are \"Small\" (4 - 4 provisioned concurrency), \"Medium\" (8 - 16 provisioned concurrency), and \"Large\" (16 - 64 provisioned concurrency).\nIf scale-to-zero is enabled, the lower bound of the provisioned concurrency for each workload size will be 0.\n",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.ServedModelInputWorkloadSize",
|
||||
"enum": [
|
||||
"Small",
|
||||
"Medium",
|
||||
"Large"
|
||||
]
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.ServedModelInputWorkloadSize"
|
||||
},
|
||||
"workload_type": {
|
||||
"description": "The workload type of the served model. The workload type selects which type of compute to use in the endpoint. The default value for this parameter is\n\"CPU\". For deep learning workloads, GPU acceleration is available by selecting workload types like GPU_SMALL and others.\nSee the available [GPU types](https://docs.databricks.com/machine-learning/model-serving/create-manage-serving-endpoints.html#gpu-workload-types).\n",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.ServedModelInputWorkloadType",
|
||||
"enum": [
|
||||
"CPU",
|
||||
"GPU_SMALL",
|
||||
"GPU_MEDIUM",
|
||||
"GPU_LARGE",
|
||||
"MULTIGPU_MEDIUM"
|
||||
]
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.ServedModelInputWorkloadType"
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
|
@ -5144,10 +5317,24 @@
|
|||
]
|
||||
},
|
||||
"serving.ServedModelInputWorkloadSize": {
|
||||
"type": "string"
|
||||
"type": "string",
|
||||
"description": "The workload size of the served model. The workload size corresponds to a range of provisioned concurrency that the compute will autoscale between.\nA single unit of provisioned concurrency can process one request at a time.\nValid workload sizes are \"Small\" (4 - 4 provisioned concurrency), \"Medium\" (8 - 16 provisioned concurrency), and \"Large\" (16 - 64 provisioned concurrency).\nIf scale-to-zero is enabled, the lower bound of the provisioned concurrency for each workload size will be 0.\n",
|
||||
"enum": [
|
||||
"Small",
|
||||
"Medium",
|
||||
"Large"
|
||||
]
|
||||
},
|
||||
"serving.ServedModelInputWorkloadType": {
|
||||
"type": "string"
|
||||
"type": "string",
|
||||
"description": "The workload type of the served model. The workload type selects which type of compute to use in the endpoint. The default value for this parameter is\n\"CPU\". For deep learning workloads, GPU acceleration is available by selecting workload types like GPU_SMALL and others.\nSee the available [GPU types](https://docs.databricks.com/machine-learning/model-serving/create-manage-serving-endpoints.html#gpu-workload-types).\n",
|
||||
"enum": [
|
||||
"CPU",
|
||||
"GPU_SMALL",
|
||||
"GPU_MEDIUM",
|
||||
"GPU_LARGE",
|
||||
"MULTIGPU_MEDIUM"
|
||||
]
|
||||
},
|
||||
"serving.TrafficConfig": {
|
||||
"anyOf": [
|
||||
|
@ -5246,6 +5433,20 @@
|
|||
}
|
||||
]
|
||||
},
|
||||
"resources.Dashboard": {
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"$ref": "#/$defs/github.com/databricks/cli/bundle/config/resources.Dashboard"
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}"
|
||||
}
|
||||
]
|
||||
},
|
||||
"resources.Job": {
|
||||
"anyOf": [
|
||||
{
|
||||
|
|
|
@ -191,6 +191,8 @@ func newList() *cobra.Command {
|
|||
|
||||
// TODO: short flags
|
||||
|
||||
cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `An opaque page token which was the next_page_token in the response of the previous request to list the secrets for this service principal.`)
|
||||
|
||||
cmd.Use = "list SERVICE_PRINCIPAL_ID"
|
||||
cmd.Short = `List service principal secrets.`
|
||||
cmd.Long = `List service principal secrets.
|
||||
|
|
|
@ -81,6 +81,7 @@ func newCreate() *cobra.Command {
|
|||
cmd.Flags().StringVar(&createReq.DeploymentName, "deployment-name", createReq.DeploymentName, `The deployment name defines part of the subdomain for the workspace.`)
|
||||
// TODO: complex arg: gcp_managed_network_config
|
||||
// TODO: complex arg: gke_config
|
||||
cmd.Flags().BoolVar(&createReq.IsNoPublicIpEnabled, "is-no-public-ip-enabled", createReq.IsNoPublicIpEnabled, `Whether no public IP is enabled for the workspace.`)
|
||||
cmd.Flags().StringVar(&createReq.Location, "location", createReq.Location, `The Google Cloud region of the workspace data plane in your Google account.`)
|
||||
cmd.Flags().StringVar(&createReq.ManagedServicesCustomerManagedKeyId, "managed-services-customer-managed-key-id", createReq.ManagedServicesCustomerManagedKeyId, `The ID of the workspace's managed services encryption key configuration object.`)
|
||||
cmd.Flags().StringVar(&createReq.NetworkId, "network-id", createReq.NetworkId, ``)
|
||||
|
@ -420,6 +421,7 @@ func newUpdate() *cobra.Command {
|
|||
cmd.Flags().StringVar(&updateReq.ManagedServicesCustomerManagedKeyId, "managed-services-customer-managed-key-id", updateReq.ManagedServicesCustomerManagedKeyId, `The ID of the workspace's managed services encryption key configuration object.`)
|
||||
cmd.Flags().StringVar(&updateReq.NetworkConnectivityConfigId, "network-connectivity-config-id", updateReq.NetworkConnectivityConfigId, ``)
|
||||
cmd.Flags().StringVar(&updateReq.NetworkId, "network-id", updateReq.NetworkId, `The ID of the workspace's network configuration object.`)
|
||||
cmd.Flags().StringVar(&updateReq.PrivateAccessSettingsId, "private-access-settings-id", updateReq.PrivateAccessSettingsId, `The ID of the workspace's private access settings configuration object.`)
|
||||
cmd.Flags().StringVar(&updateReq.StorageConfigurationId, "storage-configuration-id", updateReq.StorageConfigurationId, `The ID of the workspace's storage configuration object.`)
|
||||
cmd.Flags().StringVar(&updateReq.StorageCustomerManagedKeyId, "storage-customer-managed-key-id", updateReq.StorageCustomerManagedKeyId, `The ID of the key configuration object for workspace storage.`)
|
||||
|
||||
|
|
|
@ -27,5 +27,6 @@ func New() *cobra.Command {
|
|||
cmd.AddCommand(newGenerateCommand())
|
||||
cmd.AddCommand(newDebugCommand())
|
||||
cmd.AddCommand(deployment.NewDeploymentCommand())
|
||||
cmd.AddCommand(newOpenCommand())
|
||||
return cmd
|
||||
}
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"os"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config/mutator"
|
||||
"github.com/databricks/cli/bundle/phases"
|
||||
"github.com/databricks/cli/cmd/bundle/utils"
|
||||
"github.com/databricks/cli/cmd/root"
|
||||
|
@ -62,7 +63,12 @@ func newDestroyCommand() *cobra.Command {
|
|||
|
||||
diags = bundle.Apply(ctx, b, bundle.Seq(
|
||||
phases.Initialize(),
|
||||
phases.Build(),
|
||||
// We need to resolve artifact variable (how we do it in build phase)
|
||||
// because some of the to-be-destroyed resource might use this variable.
|
||||
// Not resolving might lead to terraform "Reference to undeclared resource" error
|
||||
mutator.ResolveVariableReferences(
|
||||
"artifacts",
|
||||
),
|
||||
phases.Destroy(),
|
||||
))
|
||||
if err := diags.Error(); err != nil {
|
||||
|
|
|
@ -16,6 +16,7 @@ func newGenerateCommand() *cobra.Command {
|
|||
|
||||
cmd.AddCommand(generate.NewGenerateJobCommand())
|
||||
cmd.AddCommand(generate.NewGeneratePipelineCommand())
|
||||
cmd.AddCommand(generate.NewGenerateDashboardCommand())
|
||||
cmd.PersistentFlags().StringVar(&key, "key", "", `resource key to use for the generated configuration`)
|
||||
return cmd
|
||||
}
|
||||
|
|
|
@ -0,0 +1,467 @@
|
|||
package generate
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config/generate"
|
||||
"github.com/databricks/cli/bundle/deploy/terraform"
|
||||
"github.com/databricks/cli/bundle/phases"
|
||||
"github.com/databricks/cli/bundle/render"
|
||||
"github.com/databricks/cli/bundle/resources"
|
||||
"github.com/databricks/cli/cmd/root"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
"github.com/databricks/cli/libs/dyn/yamlsaver"
|
||||
"github.com/databricks/cli/libs/textutil"
|
||||
"github.com/databricks/databricks-sdk-go"
|
||||
"github.com/databricks/databricks-sdk-go/apierr"
|
||||
"github.com/databricks/databricks-sdk-go/service/dashboards"
|
||||
"github.com/databricks/databricks-sdk-go/service/workspace"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/exp/maps"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
type dashboard struct {
|
||||
// Lookup flags for one-time generate.
|
||||
existingPath string
|
||||
existingID string
|
||||
|
||||
// Lookup flag for existing bundle resource.
|
||||
resource string
|
||||
|
||||
// Where to write the configuration and dashboard representation.
|
||||
resourceDir string
|
||||
dashboardDir string
|
||||
|
||||
// Force overwrite of existing files.
|
||||
force bool
|
||||
|
||||
// Watch for changes to the dashboard.
|
||||
watch bool
|
||||
|
||||
// Relative path from the resource directory to the dashboard directory.
|
||||
relativeDashboardDir string
|
||||
}
|
||||
|
||||
func (d *dashboard) resolveID(ctx context.Context, b *bundle.Bundle) (string, diag.Diagnostics) {
|
||||
switch {
|
||||
case d.existingPath != "":
|
||||
return d.resolveFromPath(ctx, b)
|
||||
case d.existingID != "":
|
||||
return d.resolveFromID(ctx, b)
|
||||
}
|
||||
|
||||
return "", diag.Errorf("expected one of --dashboard-path, --dashboard-id")
|
||||
}
|
||||
|
||||
func (d *dashboard) resolveFromPath(ctx context.Context, b *bundle.Bundle) (string, diag.Diagnostics) {
|
||||
w := b.WorkspaceClient()
|
||||
obj, err := w.Workspace.GetStatusByPath(ctx, d.existingPath)
|
||||
if err != nil {
|
||||
if apierr.IsMissing(err) {
|
||||
return "", diag.Errorf("dashboard %q not found", path.Base(d.existingPath))
|
||||
}
|
||||
|
||||
// Emit a more descriptive error message for legacy dashboards.
|
||||
if errors.Is(err, apierr.ErrBadRequest) && strings.HasPrefix(err.Error(), "dbsqlDashboard ") {
|
||||
return "", diag.Diagnostics{
|
||||
{
|
||||
Severity: diag.Error,
|
||||
Summary: fmt.Sprintf("dashboard %q is a legacy dashboard", path.Base(d.existingPath)),
|
||||
Detail: "" +
|
||||
"Databricks Asset Bundles work exclusively with AI/BI dashboards.\n" +
|
||||
"\n" +
|
||||
"Instructions on how to convert a legacy dashboard to an AI/BI dashboard\n" +
|
||||
"can be found at: https://docs.databricks.com/en/dashboards/clone-legacy-to-aibi.html.",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
return "", diag.FromErr(err)
|
||||
}
|
||||
|
||||
if obj.ObjectType != workspace.ObjectTypeDashboard {
|
||||
found := strings.ToLower(obj.ObjectType.String())
|
||||
return "", diag.Diagnostics{
|
||||
{
|
||||
Severity: diag.Error,
|
||||
Summary: fmt.Sprintf("expected a dashboard, found a %s", found),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
if obj.ResourceId == "" {
|
||||
return "", diag.Diagnostics{
|
||||
{
|
||||
Severity: diag.Error,
|
||||
Summary: "expected a non-empty dashboard resource ID",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
return obj.ResourceId, nil
|
||||
}
|
||||
|
||||
func (d *dashboard) resolveFromID(ctx context.Context, b *bundle.Bundle) (string, diag.Diagnostics) {
|
||||
w := b.WorkspaceClient()
|
||||
obj, err := w.Lakeview.GetByDashboardId(ctx, d.existingID)
|
||||
if err != nil {
|
||||
if apierr.IsMissing(err) {
|
||||
return "", diag.Errorf("dashboard with ID %s not found", d.existingID)
|
||||
}
|
||||
return "", diag.FromErr(err)
|
||||
}
|
||||
|
||||
return obj.DashboardId, nil
|
||||
}
|
||||
|
||||
func remarshalJSON(data []byte) ([]byte, error) {
|
||||
var tmp any
|
||||
var err error
|
||||
err = json.Unmarshal(data, &tmp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Remarshal the data to ensure its formatting is stable.
|
||||
// The result will have alphabetically sorted keys and be indented.
|
||||
// HTML escaping is disabled to retain characters such as &, <, and >.
|
||||
var buf bytes.Buffer
|
||||
enc := json.NewEncoder(&buf)
|
||||
enc.SetIndent("", " ")
|
||||
enc.SetEscapeHTML(false)
|
||||
err = enc.Encode(tmp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
func (d *dashboard) saveSerializedDashboard(_ context.Context, b *bundle.Bundle, dashboard *dashboards.Dashboard, filename string) error {
|
||||
// Unmarshal and remarshal the serialized dashboard to ensure it is formatted correctly.
|
||||
// The result will have alphabetically sorted keys and be indented.
|
||||
data, err := remarshalJSON([]byte(dashboard.SerializedDashboard))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Make sure the output directory exists.
|
||||
if err := os.MkdirAll(filepath.Dir(filename), 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Clean the filename to ensure it is a valid path (and can be used on this OS).
|
||||
filename = filepath.Clean(filename)
|
||||
|
||||
// Attempt to make the path relative to the bundle root.
|
||||
rel, err := filepath.Rel(b.BundleRootPath, filename)
|
||||
if err != nil {
|
||||
rel = filename
|
||||
}
|
||||
|
||||
// Verify that the file does not already exist.
|
||||
info, err := os.Stat(filename)
|
||||
if err == nil {
|
||||
if info.IsDir() {
|
||||
return fmt.Errorf("%s is a directory", rel)
|
||||
}
|
||||
if !d.force {
|
||||
return fmt.Errorf("%s already exists. Use --force to overwrite", rel)
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Printf("Writing dashboard to %q\n", rel)
|
||||
return os.WriteFile(filename, data, 0644)
|
||||
}
|
||||
|
||||
func (d *dashboard) saveConfiguration(ctx context.Context, b *bundle.Bundle, dashboard *dashboards.Dashboard, key string) error {
|
||||
// Save serialized dashboard definition to the dashboard directory.
|
||||
dashboardBasename := fmt.Sprintf("%s.lvdash.json", key)
|
||||
dashboardPath := filepath.Join(d.dashboardDir, dashboardBasename)
|
||||
err := d.saveSerializedDashboard(ctx, b, dashboard, dashboardPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Synthesize resource configuration.
|
||||
v, err := generate.ConvertDashboardToValue(dashboard, path.Join(d.relativeDashboardDir, dashboardBasename))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
result := map[string]dyn.Value{
|
||||
"resources": dyn.V(map[string]dyn.Value{
|
||||
"dashboards": dyn.V(map[string]dyn.Value{
|
||||
key: v,
|
||||
}),
|
||||
}),
|
||||
}
|
||||
|
||||
// Make sure the output directory exists.
|
||||
if err := os.MkdirAll(d.resourceDir, 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Save the configuration to the resource directory.
|
||||
resourcePath := filepath.Join(d.resourceDir, fmt.Sprintf("%s.dashboard.yml", key))
|
||||
saver := yamlsaver.NewSaverWithStyle(map[string]yaml.Style{
|
||||
"display_name": yaml.DoubleQuotedStyle,
|
||||
})
|
||||
|
||||
// Attempt to make the path relative to the bundle root.
|
||||
rel, err := filepath.Rel(b.BundleRootPath, resourcePath)
|
||||
if err != nil {
|
||||
rel = resourcePath
|
||||
}
|
||||
|
||||
fmt.Printf("Writing configuration to %q\n", rel)
|
||||
err = saver.SaveAsYAML(result, resourcePath, d.force)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func waitForChanges(ctx context.Context, w *databricks.WorkspaceClient, dashboard *dashboards.Dashboard) diag.Diagnostics {
|
||||
// Compute [time.Time] for the most recent update.
|
||||
tref, err := time.Parse(time.RFC3339, dashboard.UpdateTime)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
for {
|
||||
obj, err := w.Workspace.GetStatusByPath(ctx, dashboard.Path)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
// Compute [time.Time] from timestamp in millis since epoch.
|
||||
tcur := time.Unix(0, obj.ModifiedAt*int64(time.Millisecond))
|
||||
if tcur.After(tref) {
|
||||
break
|
||||
}
|
||||
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *dashboard) updateDashboardForResource(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||
resource, ok := b.Config.Resources.Dashboards[d.resource]
|
||||
if !ok {
|
||||
return diag.Errorf("dashboard resource %q is not defined", d.resource)
|
||||
}
|
||||
|
||||
if resource.FilePath == "" {
|
||||
return diag.Errorf("dashboard resource %q has no file path defined", d.resource)
|
||||
}
|
||||
|
||||
// Resolve the dashboard ID from the resource.
|
||||
dashboardID := resource.ID
|
||||
|
||||
// Overwrite the dashboard at the path referenced from the resource.
|
||||
dashboardPath := resource.FilePath
|
||||
|
||||
w := b.WorkspaceClient()
|
||||
|
||||
// Start polling the underlying dashboard for changes.
|
||||
var etag string
|
||||
for {
|
||||
dashboard, err := w.Lakeview.GetByDashboardId(ctx, dashboardID)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
if etag != dashboard.Etag {
|
||||
err = d.saveSerializedDashboard(ctx, b, dashboard, dashboardPath)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Abort if we are not watching for changes.
|
||||
if !d.watch {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Update the etag for the next iteration.
|
||||
etag = dashboard.Etag
|
||||
|
||||
// Now poll the workspace API for changes.
|
||||
// This is much more efficient than polling the dashboard API because it
|
||||
// includes the entire serialized dashboard whereas we're only interested
|
||||
// in the last modified time of the dashboard here.
|
||||
waitForChanges(ctx, w, dashboard)
|
||||
}
|
||||
}
|
||||
|
||||
func (d *dashboard) generateForExisting(ctx context.Context, b *bundle.Bundle, dashboardID string) diag.Diagnostics {
|
||||
w := b.WorkspaceClient()
|
||||
dashboard, err := w.Lakeview.GetByDashboardId(ctx, dashboardID)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
key := textutil.NormalizeString(dashboard.DisplayName)
|
||||
err = d.saveConfiguration(ctx, b, dashboard, key)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *dashboard) initialize(b *bundle.Bundle) diag.Diagnostics {
|
||||
// Make the paths absolute if they aren't already.
|
||||
if !filepath.IsAbs(d.resourceDir) {
|
||||
d.resourceDir = filepath.Join(b.BundleRootPath, d.resourceDir)
|
||||
}
|
||||
if !filepath.IsAbs(d.dashboardDir) {
|
||||
d.dashboardDir = filepath.Join(b.BundleRootPath, d.dashboardDir)
|
||||
}
|
||||
|
||||
// Make sure we know how the dashboard path is relative to the resource path.
|
||||
rel, err := filepath.Rel(d.resourceDir, d.dashboardDir)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
d.relativeDashboardDir = filepath.ToSlash(rel)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *dashboard) runForResource(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||
diags := bundle.Apply(ctx, b, bundle.Seq(
|
||||
phases.Initialize(),
|
||||
terraform.Interpolate(),
|
||||
terraform.Write(),
|
||||
terraform.StatePull(),
|
||||
terraform.Load(),
|
||||
))
|
||||
if diags.HasError() {
|
||||
return diags
|
||||
}
|
||||
|
||||
return d.updateDashboardForResource(ctx, b)
|
||||
}
|
||||
|
||||
func (d *dashboard) runForExisting(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||
// Resolve the ID of the dashboard to generate configuration for.
|
||||
dashboardID, diags := d.resolveID(ctx, b)
|
||||
if diags.HasError() {
|
||||
return diags
|
||||
}
|
||||
|
||||
return d.generateForExisting(ctx, b, dashboardID)
|
||||
}
|
||||
|
||||
func (d *dashboard) RunE(cmd *cobra.Command, args []string) error {
|
||||
ctx := cmd.Context()
|
||||
b, diags := root.MustConfigureBundle(cmd)
|
||||
if diags.HasError() {
|
||||
return diags.Error()
|
||||
}
|
||||
|
||||
diags = d.initialize(b)
|
||||
if diags.HasError() {
|
||||
return diags.Error()
|
||||
}
|
||||
|
||||
if d.resource != "" {
|
||||
diags = d.runForResource(ctx, b)
|
||||
} else {
|
||||
diags = d.runForExisting(ctx, b)
|
||||
}
|
||||
|
||||
renderOpts := render.RenderOptions{RenderSummaryTable: false}
|
||||
err := render.RenderDiagnostics(cmd.OutOrStdout(), b, diags, renderOpts)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to render output: %w", err)
|
||||
}
|
||||
|
||||
if diags.HasError() {
|
||||
return root.ErrAlreadyPrinted
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// filterDashboards returns a filter that only includes dashboards.
|
||||
func filterDashboards(ref resources.Reference) bool {
|
||||
return ref.Description.SingularName == "dashboard"
|
||||
}
|
||||
|
||||
// dashboardResourceCompletion executes to autocomplete the argument to the resource flag.
|
||||
func dashboardResourceCompletion(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
b, diags := root.MustConfigureBundle(cmd)
|
||||
if err := diags.Error(); err != nil {
|
||||
cobra.CompErrorln(err.Error())
|
||||
return nil, cobra.ShellCompDirectiveError
|
||||
}
|
||||
|
||||
if b == nil {
|
||||
return nil, cobra.ShellCompDirectiveNoFileComp
|
||||
}
|
||||
|
||||
return maps.Keys(resources.Completions(b, filterDashboards)), cobra.ShellCompDirectiveNoFileComp
|
||||
}
|
||||
|
||||
func NewGenerateDashboardCommand() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "dashboard",
|
||||
Short: "Generate configuration for a dashboard",
|
||||
}
|
||||
|
||||
d := &dashboard{}
|
||||
|
||||
// Lookup flags.
|
||||
cmd.Flags().StringVar(&d.existingPath, "existing-path", "", `workspace path of the dashboard to generate configuration for`)
|
||||
cmd.Flags().StringVar(&d.existingID, "existing-id", "", `ID of the dashboard to generate configuration for`)
|
||||
cmd.Flags().StringVar(&d.resource, "resource", "", `resource key of dashboard to watch for changes`)
|
||||
|
||||
// Alias lookup flags that include the resource type name.
|
||||
// Included for symmetry with the other generate commands, but we prefer the shorter flags.
|
||||
cmd.Flags().StringVar(&d.existingPath, "existing-dashboard-path", "", `workspace path of the dashboard to generate configuration for`)
|
||||
cmd.Flags().StringVar(&d.existingID, "existing-dashboard-id", "", `ID of the dashboard to generate configuration for`)
|
||||
cmd.Flags().MarkHidden("existing-dashboard-path")
|
||||
cmd.Flags().MarkHidden("existing-dashboard-id")
|
||||
|
||||
// Output flags.
|
||||
cmd.Flags().StringVarP(&d.resourceDir, "resource-dir", "d", "./resources", `directory to write the configuration to`)
|
||||
cmd.Flags().StringVarP(&d.dashboardDir, "dashboard-dir", "s", "./src", `directory to write the dashboard representation to`)
|
||||
cmd.Flags().BoolVarP(&d.force, "force", "f", false, `force overwrite existing files in the output directory`)
|
||||
|
||||
// Exactly one of the lookup flags must be provided.
|
||||
cmd.MarkFlagsOneRequired(
|
||||
"existing-path",
|
||||
"existing-id",
|
||||
"resource",
|
||||
)
|
||||
|
||||
// Watch flag. This is relevant only in combination with the resource flag.
|
||||
cmd.Flags().BoolVar(&d.watch, "watch", false, `watch for changes to the dashboard and update the configuration`)
|
||||
|
||||
// Make sure the watch flag is only used with the existing-resource flag.
|
||||
cmd.MarkFlagsMutuallyExclusive("watch", "existing-path")
|
||||
cmd.MarkFlagsMutuallyExclusive("watch", "existing-id")
|
||||
|
||||
// Completion for the resource flag.
|
||||
cmd.RegisterFlagCompletionFunc("resource", dashboardResourceCompletion)
|
||||
|
||||
cmd.RunE = d.RunE
|
||||
return cmd
|
||||
}
|
|
@ -0,0 +1,182 @@
|
|||
package generate
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/databricks-sdk-go/apierr"
|
||||
"github.com/databricks/databricks-sdk-go/experimental/mocks"
|
||||
"github.com/databricks/databricks-sdk-go/service/dashboards"
|
||||
"github.com/databricks/databricks-sdk-go/service/workspace"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestDashboard_ErrorOnLegacyDashboard(t *testing.T) {
|
||||
// Response to a GetStatus request on a path pointing to a legacy dashboard.
|
||||
//
|
||||
// < HTTP/2.0 400 Bad Request
|
||||
// < {
|
||||
// < "error_code": "BAD_REQUEST",
|
||||
// < "message": "dbsqlDashboard is not user-facing."
|
||||
// < }
|
||||
|
||||
d := dashboard{
|
||||
existingPath: "/path/to/legacy dashboard",
|
||||
}
|
||||
|
||||
m := mocks.NewMockWorkspaceClient(t)
|
||||
w := m.GetMockWorkspaceAPI()
|
||||
w.On("GetStatusByPath", mock.Anything, "/path/to/legacy dashboard").Return(nil, &apierr.APIError{
|
||||
StatusCode: 400,
|
||||
ErrorCode: "BAD_REQUEST",
|
||||
Message: "dbsqlDashboard is not user-facing.",
|
||||
})
|
||||
|
||||
ctx := context.Background()
|
||||
b := &bundle.Bundle{}
|
||||
b.SetWorkpaceClient(m.WorkspaceClient)
|
||||
|
||||
_, diags := d.resolveID(ctx, b)
|
||||
require.Len(t, diags, 1)
|
||||
assert.Equal(t, diags[0].Summary, "dashboard \"legacy dashboard\" is a legacy dashboard")
|
||||
}
|
||||
|
||||
func TestDashboard_ExistingID_Nominal(t *testing.T) {
|
||||
root := t.TempDir()
|
||||
b := &bundle.Bundle{
|
||||
BundleRootPath: root,
|
||||
}
|
||||
|
||||
m := mocks.NewMockWorkspaceClient(t)
|
||||
b.SetWorkpaceClient(m.WorkspaceClient)
|
||||
|
||||
dashboardsAPI := m.GetMockLakeviewAPI()
|
||||
dashboardsAPI.EXPECT().GetByDashboardId(mock.Anything, "f00dcafe").Return(&dashboards.Dashboard{
|
||||
DashboardId: "f00dcafe",
|
||||
DisplayName: "This is a test dashboard",
|
||||
SerializedDashboard: `{"pages":[{"displayName":"New Page","layout":[],"name":"12345678"}]}`,
|
||||
WarehouseId: "w4r3h0us3",
|
||||
}, nil)
|
||||
|
||||
ctx := bundle.Context(context.Background(), b)
|
||||
cmd := NewGenerateDashboardCommand()
|
||||
cmd.SetContext(ctx)
|
||||
cmd.Flag("existing-id").Value.Set("f00dcafe")
|
||||
|
||||
err := cmd.RunE(cmd, []string{})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Assert the contents of the generated configuration
|
||||
data, err := os.ReadFile(filepath.Join(root, "resources", "this_is_a_test_dashboard.dashboard.yml"))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, `resources:
|
||||
dashboards:
|
||||
this_is_a_test_dashboard:
|
||||
display_name: "This is a test dashboard"
|
||||
warehouse_id: w4r3h0us3
|
||||
file_path: ../src/this_is_a_test_dashboard.lvdash.json
|
||||
`, string(data))
|
||||
|
||||
data, err = os.ReadFile(filepath.Join(root, "src", "this_is_a_test_dashboard.lvdash.json"))
|
||||
require.NoError(t, err)
|
||||
assert.JSONEq(t, `{"pages":[{"displayName":"New Page","layout":[],"name":"12345678"}]}`, string(data))
|
||||
}
|
||||
|
||||
func TestDashboard_ExistingID_NotFound(t *testing.T) {
|
||||
root := t.TempDir()
|
||||
b := &bundle.Bundle{
|
||||
BundleRootPath: root,
|
||||
}
|
||||
|
||||
m := mocks.NewMockWorkspaceClient(t)
|
||||
b.SetWorkpaceClient(m.WorkspaceClient)
|
||||
|
||||
dashboardsAPI := m.GetMockLakeviewAPI()
|
||||
dashboardsAPI.EXPECT().GetByDashboardId(mock.Anything, "f00dcafe").Return(nil, &apierr.APIError{
|
||||
StatusCode: 404,
|
||||
})
|
||||
|
||||
ctx := bundle.Context(context.Background(), b)
|
||||
cmd := NewGenerateDashboardCommand()
|
||||
cmd.SetContext(ctx)
|
||||
cmd.Flag("existing-id").Value.Set("f00dcafe")
|
||||
|
||||
err := cmd.RunE(cmd, []string{})
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
func TestDashboard_ExistingPath_Nominal(t *testing.T) {
|
||||
root := t.TempDir()
|
||||
b := &bundle.Bundle{
|
||||
BundleRootPath: root,
|
||||
}
|
||||
|
||||
m := mocks.NewMockWorkspaceClient(t)
|
||||
b.SetWorkpaceClient(m.WorkspaceClient)
|
||||
|
||||
workspaceAPI := m.GetMockWorkspaceAPI()
|
||||
workspaceAPI.EXPECT().GetStatusByPath(mock.Anything, "/path/to/dashboard").Return(&workspace.ObjectInfo{
|
||||
ObjectType: workspace.ObjectTypeDashboard,
|
||||
ResourceId: "f00dcafe",
|
||||
}, nil)
|
||||
|
||||
dashboardsAPI := m.GetMockLakeviewAPI()
|
||||
dashboardsAPI.EXPECT().GetByDashboardId(mock.Anything, "f00dcafe").Return(&dashboards.Dashboard{
|
||||
DashboardId: "f00dcafe",
|
||||
DisplayName: "This is a test dashboard",
|
||||
SerializedDashboard: `{"pages":[{"displayName":"New Page","layout":[],"name":"12345678"}]}`,
|
||||
WarehouseId: "w4r3h0us3",
|
||||
}, nil)
|
||||
|
||||
ctx := bundle.Context(context.Background(), b)
|
||||
cmd := NewGenerateDashboardCommand()
|
||||
cmd.SetContext(ctx)
|
||||
cmd.Flag("existing-path").Value.Set("/path/to/dashboard")
|
||||
|
||||
err := cmd.RunE(cmd, []string{})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Assert the contents of the generated configuration
|
||||
data, err := os.ReadFile(filepath.Join(root, "resources", "this_is_a_test_dashboard.dashboard.yml"))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, `resources:
|
||||
dashboards:
|
||||
this_is_a_test_dashboard:
|
||||
display_name: "This is a test dashboard"
|
||||
warehouse_id: w4r3h0us3
|
||||
file_path: ../src/this_is_a_test_dashboard.lvdash.json
|
||||
`, string(data))
|
||||
|
||||
data, err = os.ReadFile(filepath.Join(root, "src", "this_is_a_test_dashboard.lvdash.json"))
|
||||
require.NoError(t, err)
|
||||
assert.JSONEq(t, `{"pages":[{"displayName":"New Page","layout":[],"name":"12345678"}]}`, string(data))
|
||||
}
|
||||
|
||||
func TestDashboard_ExistingPath_NotFound(t *testing.T) {
|
||||
root := t.TempDir()
|
||||
b := &bundle.Bundle{
|
||||
BundleRootPath: root,
|
||||
}
|
||||
|
||||
m := mocks.NewMockWorkspaceClient(t)
|
||||
b.SetWorkpaceClient(m.WorkspaceClient)
|
||||
|
||||
workspaceAPI := m.GetMockWorkspaceAPI()
|
||||
workspaceAPI.EXPECT().GetStatusByPath(mock.Anything, "/path/to/dashboard").Return(nil, &apierr.APIError{
|
||||
StatusCode: 404,
|
||||
})
|
||||
|
||||
ctx := bundle.Context(context.Background(), b)
|
||||
cmd := NewGenerateDashboardCommand()
|
||||
cmd.SetContext(ctx)
|
||||
cmd.Flag("existing-path").Value.Set("/path/to/dashboard")
|
||||
|
||||
err := cmd.RunE(cmd, []string{})
|
||||
require.Error(t, err)
|
||||
}
|
|
@ -0,0 +1,144 @@
|
|||
package bundle
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config/mutator"
|
||||
"github.com/databricks/cli/bundle/deploy/terraform"
|
||||
"github.com/databricks/cli/bundle/phases"
|
||||
"github.com/databricks/cli/bundle/resources"
|
||||
"github.com/databricks/cli/cmd/bundle/utils"
|
||||
"github.com/databricks/cli/cmd/root"
|
||||
"github.com/databricks/cli/libs/cmdio"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/exp/maps"
|
||||
|
||||
"github.com/pkg/browser"
|
||||
)
|
||||
|
||||
func promptOpenArgument(ctx context.Context, b *bundle.Bundle) (string, error) {
|
||||
// Compute map of "Human readable name of resource" -> "resource key".
|
||||
inv := make(map[string]string)
|
||||
for k, ref := range resources.Completions(b) {
|
||||
title := fmt.Sprintf("%s: %s", ref.Description.SingularTitle, ref.Resource.GetName())
|
||||
inv[title] = k
|
||||
}
|
||||
|
||||
key, err := cmdio.Select(ctx, inv, "Resource to open")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return key, nil
|
||||
}
|
||||
|
||||
func resolveOpenArgument(ctx context.Context, b *bundle.Bundle, args []string) (string, error) {
|
||||
// If no arguments are specified, prompt the user to select the resource to open.
|
||||
if len(args) == 0 && cmdio.IsPromptSupported(ctx) {
|
||||
return promptOpenArgument(ctx, b)
|
||||
}
|
||||
|
||||
if len(args) < 1 {
|
||||
return "", fmt.Errorf("expected a KEY of the resource to open")
|
||||
}
|
||||
|
||||
return args[0], nil
|
||||
}
|
||||
|
||||
func newOpenCommand() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "open",
|
||||
Short: "Open a resource in the browser",
|
||||
Args: root.MaximumNArgs(1),
|
||||
}
|
||||
|
||||
var forcePull bool
|
||||
cmd.Flags().BoolVar(&forcePull, "force-pull", false, "Skip local cache and load the state from the remote workspace")
|
||||
|
||||
cmd.RunE = func(cmd *cobra.Command, args []string) error {
|
||||
ctx := cmd.Context()
|
||||
b, diags := utils.ConfigureBundleWithVariables(cmd)
|
||||
if err := diags.Error(); err != nil {
|
||||
return diags.Error()
|
||||
}
|
||||
|
||||
diags = bundle.Apply(ctx, b, phases.Initialize())
|
||||
if err := diags.Error(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
arg, err := resolveOpenArgument(ctx, b, args)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cacheDir, err := terraform.Dir(ctx, b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, stateFileErr := os.Stat(filepath.Join(cacheDir, terraform.TerraformStateFileName))
|
||||
_, configFileErr := os.Stat(filepath.Join(cacheDir, terraform.TerraformConfigFileName))
|
||||
noCache := errors.Is(stateFileErr, os.ErrNotExist) || errors.Is(configFileErr, os.ErrNotExist)
|
||||
|
||||
if forcePull || noCache {
|
||||
diags = bundle.Apply(ctx, b, bundle.Seq(
|
||||
terraform.StatePull(),
|
||||
terraform.Interpolate(),
|
||||
terraform.Write(),
|
||||
))
|
||||
if err := diags.Error(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
diags = bundle.Apply(ctx, b, bundle.Seq(
|
||||
terraform.Load(),
|
||||
mutator.InitializeURLs(),
|
||||
))
|
||||
if err := diags.Error(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Locate resource to open.
|
||||
ref, err := resources.Lookup(b, arg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Confirm that the resource has a URL.
|
||||
url := ref.Resource.GetURL()
|
||||
if url == "" {
|
||||
return fmt.Errorf("resource does not have a URL associated with it (has it been deployed?)")
|
||||
}
|
||||
|
||||
return browser.OpenURL(url)
|
||||
}
|
||||
|
||||
cmd.ValidArgsFunction = func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
b, diags := root.MustConfigureBundle(cmd)
|
||||
if err := diags.Error(); err != nil {
|
||||
cobra.CompErrorln(err.Error())
|
||||
return nil, cobra.ShellCompDirectiveError
|
||||
}
|
||||
|
||||
// No completion in the context of a bundle.
|
||||
// Source and destination paths are taken from bundle configuration.
|
||||
if b == nil {
|
||||
return nil, cobra.ShellCompDirectiveNoFileComp
|
||||
}
|
||||
|
||||
if len(args) == 0 {
|
||||
completions := resources.Completions(b)
|
||||
return maps.Keys(completions), cobra.ShellCompDirectiveNoFileComp
|
||||
} else {
|
||||
return nil, cobra.ShellCompDirectiveNoFileComp
|
||||
}
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
|
@ -1,20 +1,75 @@
|
|||
package bundle
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/deploy/terraform"
|
||||
"github.com/databricks/cli/bundle/phases"
|
||||
"github.com/databricks/cli/bundle/resources"
|
||||
"github.com/databricks/cli/bundle/run"
|
||||
"github.com/databricks/cli/bundle/run/output"
|
||||
"github.com/databricks/cli/cmd/bundle/utils"
|
||||
"github.com/databricks/cli/cmd/root"
|
||||
"github.com/databricks/cli/libs/cmdio"
|
||||
"github.com/databricks/cli/libs/flags"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/exp/maps"
|
||||
)
|
||||
|
||||
func promptRunArgument(ctx context.Context, b *bundle.Bundle) (string, error) {
|
||||
// Compute map of "Human readable name of resource" -> "resource key".
|
||||
inv := make(map[string]string)
|
||||
for k, ref := range resources.Completions(b, run.IsRunnable) {
|
||||
title := fmt.Sprintf("%s: %s", ref.Description.SingularTitle, ref.Resource.GetName())
|
||||
inv[title] = k
|
||||
}
|
||||
|
||||
key, err := cmdio.Select(ctx, inv, "Resource to run")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return key, nil
|
||||
}
|
||||
|
||||
// resolveRunArgument resolves the resource key to run.
|
||||
// It returns the remaining arguments to pass to the runner, if applicable.
|
||||
func resolveRunArgument(ctx context.Context, b *bundle.Bundle, args []string) (string, []string, error) {
|
||||
// If no arguments are specified, prompt the user to select something to run.
|
||||
if len(args) == 0 && cmdio.IsPromptSupported(ctx) {
|
||||
key, err := promptRunArgument(ctx, b)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
return key, args, nil
|
||||
}
|
||||
|
||||
if len(args) < 1 {
|
||||
return "", nil, fmt.Errorf("expected a KEY of the resource to run")
|
||||
}
|
||||
|
||||
return args[0], args[1:], nil
|
||||
}
|
||||
|
||||
func keyToRunner(b *bundle.Bundle, arg string) (run.Runner, error) {
|
||||
// Locate the resource to run.
|
||||
ref, err := resources.Lookup(b, arg, run.IsRunnable)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Convert the resource to a runnable resource.
|
||||
runner, err := run.ToRunner(b, ref)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return runner, nil
|
||||
}
|
||||
|
||||
func newRunCommand() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "run [flags] KEY",
|
||||
|
@ -60,22 +115,9 @@ task or a Python wheel task, the second example applies.
|
|||
return err
|
||||
}
|
||||
|
||||
// If no arguments are specified, prompt the user to select something to run.
|
||||
if len(args) == 0 && cmdio.IsPromptSupported(ctx) {
|
||||
// Invert completions from KEY -> NAME, to NAME -> KEY.
|
||||
inv := make(map[string]string)
|
||||
for k, v := range run.ResourceCompletionMap(b) {
|
||||
inv[v] = k
|
||||
}
|
||||
id, err := cmdio.Select(ctx, inv, "Resource to run")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
args = append(args, id)
|
||||
}
|
||||
|
||||
if len(args) < 1 {
|
||||
return fmt.Errorf("expected a KEY of the resource to run")
|
||||
key, args, err := resolveRunArgument(ctx, b, args)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
diags = bundle.Apply(ctx, b, bundle.Seq(
|
||||
|
@ -88,31 +130,28 @@ task or a Python wheel task, the second example applies.
|
|||
return err
|
||||
}
|
||||
|
||||
runner, err := run.Find(b, args[0])
|
||||
runner, err := keyToRunner(b, key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Parse additional positional arguments.
|
||||
err = runner.ParseArgs(args[1:], &runOptions)
|
||||
err = runner.ParseArgs(args, &runOptions)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
runOptions.NoWait = noWait
|
||||
var output output.RunOutput
|
||||
if restart {
|
||||
s := cmdio.Spinner(ctx)
|
||||
s <- "Cancelling all runs"
|
||||
err := runner.Cancel(ctx)
|
||||
close(s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
output, err = runner.Restart(ctx, &runOptions)
|
||||
} else {
|
||||
output, err = runner.Run(ctx, &runOptions)
|
||||
}
|
||||
output, err := runner.Run(ctx, &runOptions)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if output != nil {
|
||||
switch root.OutputType(cmd) {
|
||||
case flags.OutputText:
|
||||
|
@ -148,10 +187,11 @@ task or a Python wheel task, the second example applies.
|
|||
}
|
||||
|
||||
if len(args) == 0 {
|
||||
return run.ResourceCompletions(b), cobra.ShellCompDirectiveNoFileComp
|
||||
completions := resources.Completions(b, run.IsRunnable)
|
||||
return maps.Keys(completions), cobra.ShellCompDirectiveNoFileComp
|
||||
} else {
|
||||
// If we know the resource to run, we can complete additional positional arguments.
|
||||
runner, err := run.Find(b, args[0])
|
||||
runner, err := keyToRunner(b, args[0])
|
||||
if err != nil {
|
||||
return nil, cobra.ShellCompDirectiveError
|
||||
}
|
||||
|
|
|
@ -1,7 +1,9 @@
|
|||
package bundle
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
|
@ -9,6 +11,7 @@ import (
|
|||
"github.com/databricks/cli/bundle/phases"
|
||||
"github.com/databricks/cli/cmd/bundle/utils"
|
||||
"github.com/databricks/cli/cmd/root"
|
||||
"github.com/databricks/cli/libs/flags"
|
||||
"github.com/databricks/cli/libs/log"
|
||||
"github.com/databricks/cli/libs/sync"
|
||||
"github.com/spf13/cobra"
|
||||
|
@ -18,6 +21,7 @@ type syncFlags struct {
|
|||
interval time.Duration
|
||||
full bool
|
||||
watch bool
|
||||
output flags.Output
|
||||
}
|
||||
|
||||
func (f *syncFlags) syncOptionsFromBundle(cmd *cobra.Command, b *bundle.Bundle) (*sync.SyncOptions, error) {
|
||||
|
@ -26,6 +30,21 @@ func (f *syncFlags) syncOptionsFromBundle(cmd *cobra.Command, b *bundle.Bundle)
|
|||
return nil, fmt.Errorf("cannot get sync options: %w", err)
|
||||
}
|
||||
|
||||
if f.output != "" {
|
||||
var outputFunc func(context.Context, <-chan sync.Event, io.Writer)
|
||||
switch f.output {
|
||||
case flags.OutputText:
|
||||
outputFunc = sync.TextOutput
|
||||
case flags.OutputJSON:
|
||||
outputFunc = sync.JsonOutput
|
||||
}
|
||||
if outputFunc != nil {
|
||||
opts.OutputHandler = func(ctx context.Context, c <-chan sync.Event) {
|
||||
outputFunc(ctx, c, cmd.OutOrStdout())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
opts.Full = f.full
|
||||
opts.PollInterval = f.interval
|
||||
return opts, nil
|
||||
|
@ -42,6 +61,7 @@ func newSyncCommand() *cobra.Command {
|
|||
cmd.Flags().DurationVar(&f.interval, "interval", 1*time.Second, "file system polling interval (for --watch)")
|
||||
cmd.Flags().BoolVar(&f.full, "full", false, "perform full synchronization (default is incremental)")
|
||||
cmd.Flags().BoolVar(&f.watch, "watch", false, "watch local file system for changes")
|
||||
cmd.Flags().Var(&f.output, "output", "type of the output format")
|
||||
|
||||
cmd.RunE = func(cmd *cobra.Command, args []string) error {
|
||||
ctx := cmd.Context()
|
||||
|
@ -65,6 +85,7 @@ func newSyncCommand() *cobra.Command {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer s.Close()
|
||||
|
||||
log.Infof(ctx, "Remote file sync location: %v", opts.RemotePath)
|
||||
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/databricks/cli/cmd/root"
|
||||
"github.com/databricks/cli/libs/fakefs"
|
||||
"github.com/databricks/cli/libs/filer"
|
||||
"github.com/databricks/databricks-sdk-go/experimental/mocks"
|
||||
"github.com/spf13/cobra"
|
||||
|
@ -84,7 +85,7 @@ func setupTest(t *testing.T) (*validArgs, *cobra.Command, *mocks.MockWorkspaceCl
|
|||
cmd, m := setupCommand(t)
|
||||
|
||||
fakeFilerForPath := func(ctx context.Context, fullPath string) (filer.Filer, string, error) {
|
||||
fakeFiler := filer.NewFakeFiler(map[string]filer.FakeFileInfo{
|
||||
fakeFiler := filer.NewFakeFiler(map[string]fakefs.FileInfo{
|
||||
"dir": {FakeName: "root", FakeDir: true},
|
||||
"dir/dirA": {FakeDir: true},
|
||||
"dir/dirB": {FakeDir: true},
|
||||
|
|
|
@ -11,6 +11,7 @@ import (
|
|||
|
||||
"github.com/databricks/cli/internal/build"
|
||||
"github.com/databricks/cli/libs/cmdio"
|
||||
"github.com/databricks/cli/libs/dbr"
|
||||
"github.com/databricks/cli/libs/log"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
@ -73,8 +74,12 @@ func New(ctx context.Context) *cobra.Command {
|
|||
// get the context back
|
||||
ctx = cmd.Context()
|
||||
|
||||
// Detect if the CLI is running on DBR and store this on the context.
|
||||
ctx = dbr.DetectRuntime(ctx)
|
||||
|
||||
// Configure our user agent with the command that's about to be executed.
|
||||
ctx = withCommandInUserAgent(ctx, cmd)
|
||||
ctx = withCommandExecIdInUserAgent(ctx)
|
||||
ctx = withUpstreamInUserAgent(ctx)
|
||||
cmd.SetContext(ctx)
|
||||
return nil
|
||||
|
|
|
@ -0,0 +1,14 @@
|
|||
package root
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/databricks/databricks-sdk-go/useragent"
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
func withCommandExecIdInUserAgent(ctx context.Context) context.Context {
|
||||
// A UUID that will allow us to correlate multiple API requests made by
|
||||
// the same CLI invocation.
|
||||
return useragent.InContext(ctx, "cmd-exec-id", uuid.New().String())
|
||||
}
|
|
@ -0,0 +1,26 @@
|
|||
package root
|
||||
|
||||
import (
|
||||
"context"
|
||||
"regexp"
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/databricks-sdk-go/useragent"
|
||||
"github.com/google/uuid"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestWithCommandExecIdInUserAgent(t *testing.T) {
|
||||
ctx := withCommandExecIdInUserAgent(context.Background())
|
||||
|
||||
// Check that the command exec ID is in the user agent string.
|
||||
ua := useragent.FromContext(ctx)
|
||||
re := regexp.MustCompile(`cmd-exec-id/([a-f0-9-]+)`)
|
||||
matches := re.FindAllStringSubmatch(ua, -1)
|
||||
|
||||
// Assert that we have exactly one match and that it's a valid UUID.
|
||||
require.Len(t, matches, 1)
|
||||
_, err := uuid.Parse(matches[0][1])
|
||||
assert.NoError(t, err)
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue