Merge remote-tracking branch 'origin' into cmd-uuid

This commit is contained in:
Shreyas Goenka 2024-10-30 15:36:24 +01:00
commit e766bfe888
No known key found for this signature in database
GPG Key ID: 92A07DF49CCB0622
332 changed files with 12458 additions and 1893 deletions

View File

@ -11,6 +11,7 @@
"toolchain": { "toolchain": {
"required": ["go"], "required": ["go"],
"post_generate": [ "post_generate": [
"go test -timeout 240s -run TestConsistentDatabricksSdkVersion github.com/databricks/cli/internal/build",
"go run ./bundle/internal/schema/*.go ./bundle/schema/jsonschema.json", "go run ./bundle/internal/schema/*.go ./bundle/schema/jsonschema.json",
"echo 'bundle/internal/tf/schema/\\*.go linguist-generated=true' >> ./.gitattributes", "echo 'bundle/internal/tf/schema/\\*.go linguist-generated=true' >> ./.gitattributes",
"echo 'go.sum linguist-generated=true' >> ./.gitattributes", "echo 'go.sum linguist-generated=true' >> ./.gitattributes",

View File

@ -1 +1 @@
6f6b1371e640f2dfeba72d365ac566368656f6b6 cf9c61453990df0f9453670f2fe68e1b128647a2

View File

@ -5,6 +5,7 @@ package {{(.TrimPrefix "account").SnakeName}}
import ( import (
"github.com/databricks/cli/libs/cmdio" "github.com/databricks/cli/libs/cmdio"
"github.com/databricks/cli/libs/flags" "github.com/databricks/cli/libs/flags"
"github.com/databricks/cli/libs/diag"
"github.com/databricks/cli/cmd/root" "github.com/databricks/cli/cmd/root"
"github.com/databricks/databricks-sdk-go/service/{{.Package.Name}}" "github.com/databricks/databricks-sdk-go/service/{{.Package.Name}}"
"github.com/spf13/cobra" "github.com/spf13/cobra"
@ -231,10 +232,16 @@ func new{{.PascalName}}() *cobra.Command {
{{- if .Request }} {{- if .Request }}
{{ if .CanUseJson }} {{ if .CanUseJson }}
if cmd.Flags().Changed("json") { if cmd.Flags().Changed("json") {
err = {{.CamelName}}Json.Unmarshal(&{{.CamelName}}Req) diags := {{.CamelName}}Json.Unmarshal(&{{.CamelName}}Req)
if diags.HasError() {
return diags.Error()
}
if len(diags) > 0 {
err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags)
if err != nil { if err != nil {
return err return err
} }
}
}{{end}}{{ if .MustUseJson }}else { }{{end}}{{ if .MustUseJson }}else {
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
}{{- end}} }{{- end}}

1
.gitattributes vendored
View File

@ -54,6 +54,7 @@ cmd/workspace/dashboards/dashboards.go linguist-generated=true
cmd/workspace/data-sources/data-sources.go linguist-generated=true cmd/workspace/data-sources/data-sources.go linguist-generated=true
cmd/workspace/default-namespace/default-namespace.go linguist-generated=true cmd/workspace/default-namespace/default-namespace.go linguist-generated=true
cmd/workspace/disable-legacy-access/disable-legacy-access.go linguist-generated=true cmd/workspace/disable-legacy-access/disable-legacy-access.go linguist-generated=true
cmd/workspace/disable-legacy-dbfs/disable-legacy-dbfs.go linguist-generated=true
cmd/workspace/enhanced-security-monitoring/enhanced-security-monitoring.go linguist-generated=true cmd/workspace/enhanced-security-monitoring/enhanced-security-monitoring.go linguist-generated=true
cmd/workspace/experiments/experiments.go linguist-generated=true cmd/workspace/experiments/experiments.go linguist-generated=true
cmd/workspace/external-locations/external-locations.go linguist-generated=true cmd/workspace/external-locations/external-locations.go linguist-generated=true

114
.github/workflows/external-message.yml vendored Normal file
View File

@ -0,0 +1,114 @@
name: PR Comment
# WARNING:
# THIS WORKFLOW ALWAYS RUNS FOR EXTERNAL CONTRIBUTORS WITHOUT ANY APPROVAL.
# THIS WORKFLOW RUNS FROM MAIN BRANCH, NOT FROM THE PR BRANCH.
# DO NOT PULL THE PR OR EXECUTE ANY CODE FROM THE PR.
on:
pull_request_target:
types: [opened, reopened, synchronize]
branches:
- main
jobs:
comment-on-pr:
runs-on: ubuntu-latest
permissions:
pull-requests: write
steps:
# NOTE: The following checks may not be accurate depending on Org or Repo settings.
- name: Check user and potential secret access
id: check-secrets-access
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
USER_LOGIN="${{ github.event.pull_request.user.login }}"
REPO_OWNER="${{ github.repository_owner }}"
REPO_NAME="${{ github.event.repository.name }}"
echo "Pull request opened by: $USER_LOGIN"
# Check if PR is from a fork
IS_FORK=$([[ "${{ github.event.pull_request.head.repo.full_name }}" != "${{ github.repository }}" ]] && echo "true" || echo "false")
HAS_ACCESS="false"
# Check user's permission level on the repository
USER_PERMISSION=$(gh api repos/$REPO_OWNER/$REPO_NAME/collaborators/$USER_LOGIN/permission --jq '.permission')
if [[ "$USER_PERMISSION" == "admin" || "$USER_PERMISSION" == "write" ]]; then
HAS_ACCESS="true"
elif [[ "$USER_PERMISSION" == "read" ]]; then
# For read access, we need to check if the user has been explicitly granted secret access
# This information is not directly available via API, so we'll make an assumption
# that read access does not imply secret access
HAS_ACCESS="false"
fi
# Check if repo owner is an organization
IS_ORG=$(gh api users/$REPO_OWNER --jq '.type == "Organization"')
if [[ "$IS_ORG" == "true" && "$HAS_ACCESS" == "false" ]]; then
# Check if user is a member of any team with write or admin access to the repo
TEAMS_WITH_ACCESS=$(gh api repos/$REPO_OWNER/$REPO_NAME/teams --jq '.[] | select(.permission == "push" or .permission == "admin") | .slug')
for team in $TEAMS_WITH_ACCESS; do
IS_TEAM_MEMBER=$(gh api orgs/$REPO_OWNER/teams/$team/memberships/$USER_LOGIN --silent && echo "true" || echo "false")
if [[ "$IS_TEAM_MEMBER" == "true" ]]; then
HAS_ACCESS="true"
break
fi
done
fi
# If it's a fork, set HAS_ACCESS to false regardless of other checks
if [[ "$IS_FORK" == "true" ]]; then
HAS_ACCESS="false"
fi
echo "has_secrets_access=$HAS_ACCESS" >> $GITHUB_OUTPUT
if [[ "$HAS_ACCESS" == "true" ]]; then
echo "User $USER_LOGIN likely has access to secrets"
else
echo "User $USER_LOGIN likely does not have access to secrets"
fi
- uses: actions/checkout@v4
- name: Delete old comments
if: steps.check-secrets-access.outputs.has_secrets_access != 'true'
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
# Delete previous comment if it exists
previous_comment_ids=$(gh api "repos/${{ github.repository }}/issues/${{ github.event.pull_request.number }}/comments" \
--jq '.[] | select(.body | startswith("<!-- INTEGRATION_TESTS -->")) | .id')
echo "Previous comment IDs: $previous_comment_ids"
# Iterate over each comment ID and delete the comment
if [ ! -z "$previous_comment_ids" ]; then
echo "$previous_comment_ids" | while read -r comment_id; do
echo "Deleting comment with ID: $comment_id"
gh api "repos/${{ github.repository }}/issues/comments/$comment_id" -X DELETE
done
fi
- name: Comment on PR
if: steps.check-secrets-access.outputs.has_secrets_access != 'true'
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
COMMIT_SHA: ${{ github.event.pull_request.head.sha }}
run: |
gh pr comment ${{ github.event.pull_request.number }} --body \
"<!-- INTEGRATION_TESTS -->
Run integration tests manually:
[go/deco-tests-run/cli](https://go/deco-tests-run/cli)
Inputs:
* PR number: ${{github.event.pull_request.number}}
* Commit SHA: \`${{ env.COMMIT_SHA }}\`
Checks will be approved automatically on success.
"

77
.github/workflows/integration-tests.yml vendored Normal file
View File

@ -0,0 +1,77 @@
name: integration
on:
pull_request:
types: [opened, synchronize]
merge_group:
jobs:
check-token:
runs-on: ubuntu-latest
outputs:
has_token: ${{ steps.set-token-status.outputs.has_token }}
steps:
- name: Check if GITHUB_TOKEN is set
id: set-token-status
run: |
if [ -z "${{ secrets.GITHUB_TOKEN }}" ]; then
echo "GITHUB_TOKEN is empty. User has no access to tokens."
echo "::set-output name=has_token::false"
else
echo "GITHUB_TOKEN is set. User has no access to tokens."
echo "::set-output name=has_token::true"
fi
trigger-tests:
runs-on: ubuntu-latest
needs: check-token
if: github.event_name == 'pull_request' && needs.check-token.outputs.has_token == 'true'
environment: "test-trigger-is"
steps:
- uses: actions/checkout@v4
- name: Generate GitHub App Token
id: generate-token
uses: actions/create-github-app-token@v1
with:
app-id: ${{ secrets.DECO_WORKFLOW_TRIGGER_APP_ID }}
private-key: ${{ secrets.DECO_WORKFLOW_TRIGGER_PRIVATE_KEY }}
owner: ${{ secrets.ORG_NAME }}
repositories: ${{secrets.REPO_NAME}}
- name: Trigger Workflow in Another Repo
env:
GH_TOKEN: ${{ steps.generate-token.outputs.token }}
run: |
gh workflow run cli-isolated-pr.yml -R ${{ secrets.ORG_NAME }}/${{secrets.REPO_NAME}} \
--ref main \
-f pull_request_number=${{ github.event.pull_request.number }} \
-f commit_sha=${{ github.event.pull_request.head.sha }}
# Statuses and checks apply to specific commits (by hash).
# Enforcement of required checks is done both at the PR level and the merge queue level.
# In case of multiple commits in a single PR, the hash of the squashed commit
# will not match the one for the latest (approved) commit in the PR.
# We auto approve the check for the merge queue for two reasons:
# * Queue times out due to duration of tests.
# * Avoid running integration tests twice, since it was already run at the tip of the branch before squashing.
auto-approve:
if: github.event_name == 'merge_group'
runs-on: ubuntu-latest
steps:
- name: Mark Check
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
shell: bash
run: |
gh api -X POST -H "Accept: application/vnd.github+json" \
-H "X-GitHub-Api-Version: 2022-11-28" \
/repos/${{ github.repository }}/statuses/${{ github.sha }} \
-f 'state=success' \
-f 'context=Integration Tests Check'

View File

@ -1,5 +1,71 @@
# Version changelog # Version changelog
## [Release] Release v0.231.0
CLI:
* Added JSON input validation for CLI commands ([#1771](https://github.com/databricks/cli/pull/1771)).
* Support Git worktrees for `sync` ([#1831](https://github.com/databricks/cli/pull/1831)).
Bundles:
* Add `bundle summary` to display URLs for deployed resources ([#1731](https://github.com/databricks/cli/pull/1731)).
* Added a warning when incorrect permissions used for `/Workspace/Shared` bundle root ([#1821](https://github.com/databricks/cli/pull/1821)).
* Show actionable errors for collaborative deployment scenarios ([#1386](https://github.com/databricks/cli/pull/1386)).
* Fix path to repository-wide exclude file ([#1837](https://github.com/databricks/cli/pull/1837)).
* Fixed typo in converting cluster permissions ([#1826](https://github.com/databricks/cli/pull/1826)).
* Ignore metastore permission error during template generation ([#1819](https://github.com/databricks/cli/pull/1819)).
* Handle normalization of `dyn.KindTime` into an any type ([#1836](https://github.com/databricks/cli/pull/1836)).
* Added support for pip options in environment dependencies ([#1842](https://github.com/databricks/cli/pull/1842)).
* Fix race condition when restarting continuous jobs ([#1849](https://github.com/databricks/cli/pull/1849)).
* Fix pipeline in default-python template not working for certain workspaces ([#1854](https://github.com/databricks/cli/pull/1854)).
* Add "output" flag to the bundle sync command ([#1853](https://github.com/databricks/cli/pull/1853)).
Internal:
* Move utility functions dealing with IAM to libs/iamutil ([#1820](https://github.com/databricks/cli/pull/1820)).
* Remove unused `IS_OWNER` constant ([#1823](https://github.com/databricks/cli/pull/1823)).
* Assert SDK version is consistent in the CLI generation process ([#1814](https://github.com/databricks/cli/pull/1814)).
* Fixed unmarshalling json input into `interface{}` type ([#1832](https://github.com/databricks/cli/pull/1832)).
* Fix `TestAccFsMkdirWhenFileExistsAtPath` in isolated Azure environments ([#1833](https://github.com/databricks/cli/pull/1833)).
* Add behavioral tests for examples from the YAML spec ([#1835](https://github.com/databricks/cli/pull/1835)).
* Remove Terraform conversion function that's no longer used ([#1840](https://github.com/databricks/cli/pull/1840)).
* Encode assumptions about the dashboards API in a test ([#1839](https://github.com/databricks/cli/pull/1839)).
* Add script to make testing of code on branches easier ([#1844](https://github.com/databricks/cli/pull/1844)).
API Changes:
* Added `databricks disable-legacy-dbfs` command group.
OpenAPI commit cf9c61453990df0f9453670f2fe68e1b128647a2 (2024-10-14)
Dependency updates:
* Upgrade TF provider to 1.54.0 ([#1852](https://github.com/databricks/cli/pull/1852)).
* Bump github.com/databricks/databricks-sdk-go from 0.48.0 to 0.49.0 ([#1843](https://github.com/databricks/cli/pull/1843)).
## [Release] Release v0.230.0
Notable changes for Databricks Asset Bundles:
Workspace paths are automatically prefixed with `/Workspace`. In addition, all usage of path strings such as `/Workspace/${workspace.root_path}/...` in bundle configuration is automatically replaced with `${workspace.root_path}/...` and generates a warning as part of bundle validate.
More details can be found here: https://docs.databricks.com/en/release-notes/dev-tools/bundles.html#workspace-paths
Bundles:
* Add an error if state files grow bigger than the export limit ([#1795](https://github.com/databricks/cli/pull/1795)).
* Always prepend bundle remote paths with /Workspace ([#1724](https://github.com/databricks/cli/pull/1724)).
* Add resource path field to bundle workspace configuration ([#1800](https://github.com/databricks/cli/pull/1800)).
* Add validation for files with a `.(resource-name).yml` extension ([#1780](https://github.com/databricks/cli/pull/1780)).
Internal:
* Remove deprecated or readonly fields from the bundle schema ([#1809](https://github.com/databricks/cli/pull/1809)).
API Changes:
* Changed `databricks git-credentials create`, `databricks git-credentials delete`, `databricks git-credentials get`, `databricks git-credentials list`, `databricks git-credentials update` commands .
* Changed `databricks repos create`, `databricks repos delete`, `databricks repos get`, `databricks repos update` command .
OpenAPI commit 0c86ea6dbd9a730c24ff0d4e509603e476955ac5 (2024-10-02)
Dependency updates:
* Upgrade TF provider to 1.53.0 ([#1815](https://github.com/databricks/cli/pull/1815)).
* Bump golang.org/x/term from 0.24.0 to 0.25.0 ([#1811](https://github.com/databricks/cli/pull/1811)).
* Bump golang.org/x/text from 0.18.0 to 0.19.0 ([#1812](https://github.com/databricks/cli/pull/1812)).
* Bump github.com/databricks/databricks-sdk-go from 0.47.0 to 0.48.0 ([#1810](https://github.com/databricks/cli/pull/1810)).
## [Release] Release v0.229.0 ## [Release] Release v0.229.0
Bundles: Bundles:

View File

@ -35,3 +35,6 @@ docker run -e DATABRICKS_HOST=$YOUR_HOST_URL -e DATABRICKS_TOKEN=$YOUR_TOKEN ghc
This CLI follows the Databricks Unified Authentication principles. This CLI follows the Databricks Unified Authentication principles.
You can find a detailed description at https://github.com/databricks/databricks-sdk-go#authentication. You can find a detailed description at https://github.com/databricks/databricks-sdk-go#authentication.
## Privacy Notice
Databricks CLI use is subject to the [Databricks License](https://github.com/databricks/cli/blob/main/LICENSE) and [Databricks Privacy Notice](https://www.databricks.com/legal/privacynotice), including any Usage Data provisions.

View File

@ -0,0 +1,18 @@
package generate
import (
"github.com/databricks/cli/libs/dyn"
"github.com/databricks/databricks-sdk-go/service/dashboards"
)
func ConvertDashboardToValue(dashboard *dashboards.Dashboard, filePath string) (dyn.Value, error) {
// The majority of fields of the dashboard struct are read-only.
// We copy the relevant fields manually.
dv := map[string]dyn.Value{
"display_name": dyn.NewValue(dashboard.DisplayName, []dyn.Location{{Line: 1}}),
"warehouse_id": dyn.NewValue(dashboard.WarehouseId, []dyn.Location{{Line: 2}}),
"file_path": dyn.NewValue(filePath, []dyn.Location{{Line: 3}}),
}
return dyn.V(dv), nil
}

View File

@ -18,7 +18,7 @@ func TestEntryPointNoRootPath(t *testing.T) {
func TestEntryPoint(t *testing.T) { func TestEntryPoint(t *testing.T) {
b := &bundle.Bundle{ b := &bundle.Bundle{
BundleRootPath: "testdata", BundleRootPath: "testdata/basic",
} }
diags := bundle.Apply(context.Background(), b, loader.EntryPoint()) diags := bundle.Apply(context.Background(), b, loader.EntryPoint())
require.NoError(t, diags.Error()) require.NoError(t, diags.Error())

View File

@ -3,12 +3,135 @@ package loader
import ( import (
"context" "context"
"fmt" "fmt"
"slices"
"sort"
"strings"
"github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle"
"github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config"
"github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/diag"
"github.com/databricks/cli/libs/dyn"
) )
func validateFileFormat(configRoot dyn.Value, filePath string) diag.Diagnostics {
for _, resourceDescription := range config.SupportedResources() {
singularName := resourceDescription.SingularName
for _, yamlExt := range []string{"yml", "yaml"} {
ext := fmt.Sprintf(".%s.%s", singularName, yamlExt)
if strings.HasSuffix(filePath, ext) {
return validateSingleResourceDefined(configRoot, ext, singularName)
}
}
}
return nil
}
func validateSingleResourceDefined(configRoot dyn.Value, ext, typ string) diag.Diagnostics {
type resource struct {
path dyn.Path
value dyn.Value
typ string
key string
}
resources := []resource{}
supportedResources := config.SupportedResources()
// Gather all resources defined in the resources block.
_, err := dyn.MapByPattern(
configRoot,
dyn.NewPattern(dyn.Key("resources"), dyn.AnyKey(), dyn.AnyKey()),
func(p dyn.Path, v dyn.Value) (dyn.Value, error) {
// The key for the resource, e.g. "my_job" for jobs.my_job.
k := p[2].Key()
// The type of the resource, e.g. "job" for jobs.my_job.
typ := supportedResources[p[1].Key()].SingularName
resources = append(resources, resource{path: p, value: v, typ: typ, key: k})
return v, nil
})
if err != nil {
return diag.FromErr(err)
}
// Gather all resources defined in a target block.
_, err = dyn.MapByPattern(
configRoot,
dyn.NewPattern(dyn.Key("targets"), dyn.AnyKey(), dyn.Key("resources"), dyn.AnyKey(), dyn.AnyKey()),
func(p dyn.Path, v dyn.Value) (dyn.Value, error) {
// The key for the resource, e.g. "my_job" for jobs.my_job.
k := p[4].Key()
// The type of the resource, e.g. "job" for jobs.my_job.
typ := supportedResources[p[3].Key()].SingularName
resources = append(resources, resource{path: p, value: v, typ: typ, key: k})
return v, nil
})
if err != nil {
return diag.FromErr(err)
}
typeMatch := true
seenKeys := map[string]struct{}{}
for _, rr := range resources {
// case: The resource is not of the correct type.
if rr.typ != typ {
typeMatch = false
break
}
seenKeys[rr.key] = struct{}{}
}
// Format matches. There's at most one resource defined in the file.
// The resource is also of the correct type.
if typeMatch && len(seenKeys) <= 1 {
return nil
}
detail := strings.Builder{}
detail.WriteString("The following resources are defined or configured in this file:\n")
lines := []string{}
for _, r := range resources {
lines = append(lines, fmt.Sprintf(" - %s (%s)\n", r.key, r.typ))
}
// Sort the lines to print to make the output deterministic.
sort.Strings(lines)
// Compact the lines before writing them to the message to remove any duplicate lines.
// This is needed because we do not dedup earlier when gathering the resources
// and it's valid to define the same resource in both the resources and targets block.
lines = slices.Compact(lines)
for _, l := range lines {
detail.WriteString(l)
}
locations := []dyn.Location{}
paths := []dyn.Path{}
for _, rr := range resources {
locations = append(locations, rr.value.Locations()...)
paths = append(paths, rr.path)
}
// Sort the locations and paths to make the output deterministic.
sort.Slice(locations, func(i, j int) bool {
return locations[i].String() < locations[j].String()
})
sort.Slice(paths, func(i, j int) bool {
return paths[i].String() < paths[j].String()
})
return diag.Diagnostics{
{
Severity: diag.Recommendation,
Summary: fmt.Sprintf("define a single %s in a file with the %s extension.", strings.ReplaceAll(typ, "_", " "), ext),
Detail: detail.String(),
Locations: locations,
Paths: paths,
},
}
}
type processInclude struct { type processInclude struct {
fullPath string fullPath string
relPath string relPath string
@ -31,6 +154,13 @@ func (m *processInclude) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnos
if diags.HasError() { if diags.HasError() {
return diags return diags
} }
// Add any diagnostics associated with the file format.
diags = append(diags, validateFileFormat(this.Value(), m.relPath)...)
if diags.HasError() {
return diags
}
err := b.Config.Merge(this) err := b.Config.Merge(this)
if err != nil { if err != nil {
diags = diags.Extend(diag.FromErr(err)) diags = diags.Extend(diag.FromErr(err))

View File

@ -8,13 +8,15 @@ import (
"github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle"
"github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config"
"github.com/databricks/cli/bundle/config/loader" "github.com/databricks/cli/bundle/config/loader"
"github.com/databricks/cli/libs/diag"
"github.com/databricks/cli/libs/dyn"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
func TestProcessInclude(t *testing.T) { func TestProcessInclude(t *testing.T) {
b := &bundle.Bundle{ b := &bundle.Bundle{
BundleRootPath: "testdata", BundleRootPath: "testdata/basic",
Config: config.Root{ Config: config.Root{
Workspace: config.Workspace{ Workspace: config.Workspace{
Host: "foo", Host: "foo",
@ -33,3 +35,184 @@ func TestProcessInclude(t *testing.T) {
require.NoError(t, diags.Error()) require.NoError(t, diags.Error())
assert.Equal(t, "bar", b.Config.Workspace.Host) assert.Equal(t, "bar", b.Config.Workspace.Host)
} }
func TestProcessIncludeFormatMatch(t *testing.T) {
for _, fileName := range []string{
"one_job.job.yml",
"one_pipeline.pipeline.yaml",
"two_job.yml",
"job_and_pipeline.yml",
"multiple_resources.yml",
} {
t.Run(fileName, func(t *testing.T) {
b := &bundle.Bundle{
BundleRootPath: "testdata/format_match",
Config: config.Root{
Bundle: config.Bundle{
Name: "format_test",
},
},
}
m := loader.ProcessInclude(filepath.Join(b.BundleRootPath, fileName), fileName)
diags := bundle.Apply(context.Background(), b, m)
assert.Empty(t, diags)
})
}
}
func TestProcessIncludeFormatNotMatch(t *testing.T) {
for fileName, expectedDiags := range map[string]diag.Diagnostics{
"single_job.pipeline.yaml": {
{
Severity: diag.Recommendation,
Summary: "define a single pipeline in a file with the .pipeline.yaml extension.",
Detail: "The following resources are defined or configured in this file:\n - job1 (job)\n",
Locations: []dyn.Location{
{File: filepath.FromSlash("testdata/format_not_match/single_job.pipeline.yaml"), Line: 11, Column: 11},
{File: filepath.FromSlash("testdata/format_not_match/single_job.pipeline.yaml"), Line: 4, Column: 7},
},
Paths: []dyn.Path{
dyn.MustPathFromString("resources.jobs.job1"),
dyn.MustPathFromString("targets.target1.resources.jobs.job1"),
},
},
},
"job_and_pipeline.job.yml": {
{
Severity: diag.Recommendation,
Summary: "define a single job in a file with the .job.yml extension.",
Detail: "The following resources are defined or configured in this file:\n - job1 (job)\n - pipeline1 (pipeline)\n",
Locations: []dyn.Location{
{File: filepath.FromSlash("testdata/format_not_match/job_and_pipeline.job.yml"), Line: 11, Column: 11},
{File: filepath.FromSlash("testdata/format_not_match/job_and_pipeline.job.yml"), Line: 4, Column: 7},
},
Paths: []dyn.Path{
dyn.MustPathFromString("resources.pipelines.pipeline1"),
dyn.MustPathFromString("targets.target1.resources.jobs.job1"),
},
},
},
"job_and_pipeline.experiment.yml": {
{
Severity: diag.Recommendation,
Summary: "define a single experiment in a file with the .experiment.yml extension.",
Detail: "The following resources are defined or configured in this file:\n - job1 (job)\n - pipeline1 (pipeline)\n",
Locations: []dyn.Location{
{File: filepath.FromSlash("testdata/format_not_match/job_and_pipeline.experiment.yml"), Line: 11, Column: 11},
{File: filepath.FromSlash("testdata/format_not_match/job_and_pipeline.experiment.yml"), Line: 4, Column: 7},
},
Paths: []dyn.Path{
dyn.MustPathFromString("resources.pipelines.pipeline1"),
dyn.MustPathFromString("targets.target1.resources.jobs.job1"),
},
},
},
"two_jobs.job.yml": {
{
Severity: diag.Recommendation,
Summary: "define a single job in a file with the .job.yml extension.",
Detail: "The following resources are defined or configured in this file:\n - job1 (job)\n - job2 (job)\n",
Locations: []dyn.Location{
{File: filepath.FromSlash("testdata/format_not_match/two_jobs.job.yml"), Line: 4, Column: 7},
{File: filepath.FromSlash("testdata/format_not_match/two_jobs.job.yml"), Line: 7, Column: 7},
},
Paths: []dyn.Path{
dyn.MustPathFromString("resources.jobs.job1"),
dyn.MustPathFromString("resources.jobs.job2"),
},
},
},
"second_job_in_target.job.yml": {
{
Severity: diag.Recommendation,
Summary: "define a single job in a file with the .job.yml extension.",
Detail: "The following resources are defined or configured in this file:\n - job1 (job)\n - job2 (job)\n",
Locations: []dyn.Location{
{File: filepath.FromSlash("testdata/format_not_match/second_job_in_target.job.yml"), Line: 11, Column: 11},
{File: filepath.FromSlash("testdata/format_not_match/second_job_in_target.job.yml"), Line: 4, Column: 7},
},
Paths: []dyn.Path{
dyn.MustPathFromString("resources.jobs.job1"),
dyn.MustPathFromString("targets.target1.resources.jobs.job2"),
},
},
},
"two_jobs_in_target.job.yml": {
{
Severity: diag.Recommendation,
Summary: "define a single job in a file with the .job.yml extension.",
Detail: "The following resources are defined or configured in this file:\n - job1 (job)\n - job2 (job)\n",
Locations: []dyn.Location{
{File: filepath.FromSlash("testdata/format_not_match/two_jobs_in_target.job.yml"), Line: 6, Column: 11},
{File: filepath.FromSlash("testdata/format_not_match/two_jobs_in_target.job.yml"), Line: 8, Column: 11},
},
Paths: []dyn.Path{
dyn.MustPathFromString("targets.target1.resources.jobs.job1"),
dyn.MustPathFromString("targets.target1.resources.jobs.job2"),
},
},
},
"multiple_resources.model_serving_endpoint.yml": {
{
Severity: diag.Recommendation,
Summary: "define a single model serving endpoint in a file with the .model_serving_endpoint.yml extension.",
Detail: `The following resources are defined or configured in this file:
- experiment1 (experiment)
- job1 (job)
- job2 (job)
- job3 (job)
- model1 (model)
- model_serving_endpoint1 (model_serving_endpoint)
- pipeline1 (pipeline)
- pipeline2 (pipeline)
- quality_monitor1 (quality_monitor)
- registered_model1 (registered_model)
- schema1 (schema)
`,
Locations: []dyn.Location{
{File: filepath.FromSlash("testdata/format_not_match/multiple_resources.model_serving_endpoint.yml"), Line: 12, Column: 7},
{File: filepath.FromSlash("testdata/format_not_match/multiple_resources.model_serving_endpoint.yml"), Line: 14, Column: 7},
{File: filepath.FromSlash("testdata/format_not_match/multiple_resources.model_serving_endpoint.yml"), Line: 18, Column: 7},
{File: filepath.FromSlash("testdata/format_not_match/multiple_resources.model_serving_endpoint.yml"), Line: 22, Column: 7},
{File: filepath.FromSlash("testdata/format_not_match/multiple_resources.model_serving_endpoint.yml"), Line: 24, Column: 7},
{File: filepath.FromSlash("testdata/format_not_match/multiple_resources.model_serving_endpoint.yml"), Line: 28, Column: 7},
{File: filepath.FromSlash("testdata/format_not_match/multiple_resources.model_serving_endpoint.yml"), Line: 35, Column: 11},
{File: filepath.FromSlash("testdata/format_not_match/multiple_resources.model_serving_endpoint.yml"), Line: 39, Column: 11},
{File: filepath.FromSlash("testdata/format_not_match/multiple_resources.model_serving_endpoint.yml"), Line: 43, Column: 11},
{File: filepath.FromSlash("testdata/format_not_match/multiple_resources.model_serving_endpoint.yml"), Line: 4, Column: 7},
{File: filepath.FromSlash("testdata/format_not_match/multiple_resources.model_serving_endpoint.yml"), Line: 8, Column: 7},
},
Paths: []dyn.Path{
dyn.MustPathFromString("resources.experiments.experiment1"),
dyn.MustPathFromString("resources.jobs.job1"),
dyn.MustPathFromString("resources.jobs.job2"),
dyn.MustPathFromString("resources.model_serving_endpoints.model_serving_endpoint1"),
dyn.MustPathFromString("resources.models.model1"),
dyn.MustPathFromString("resources.pipelines.pipeline1"),
dyn.MustPathFromString("resources.pipelines.pipeline2"),
dyn.MustPathFromString("resources.schemas.schema1"),
dyn.MustPathFromString("targets.target1.resources.jobs.job3"),
dyn.MustPathFromString("targets.target1.resources.quality_monitors.quality_monitor1"),
dyn.MustPathFromString("targets.target1.resources.registered_models.registered_model1"),
},
},
},
} {
t.Run(fileName, func(t *testing.T) {
b := &bundle.Bundle{
BundleRootPath: "testdata/format_not_match",
Config: config.Root{
Bundle: config.Bundle{
Name: "format_test",
},
},
}
m := loader.ProcessInclude(filepath.Join(b.BundleRootPath, fileName), fileName)
diags := bundle.Apply(context.Background(), b, m)
require.Len(t, diags, 1)
assert.Equal(t, expectedDiags, diags)
})
}
}

View File

@ -0,0 +1,11 @@
resources:
pipelines:
pipeline1:
name: pipeline1
targets:
target1:
resources:
jobs:
job1:
name: job1

View File

@ -0,0 +1,43 @@
resources:
experiments:
experiment1:
name: experiment1
model_serving_endpoints:
model_serving_endpoint1:
name: model_serving_endpoint1
jobs:
job1:
name: job1
job2:
name: job2
models:
model1:
name: model1
pipelines:
pipeline1:
name: pipeline1
pipeline2:
name: pipeline2
schemas:
schema1:
name: schema1
targets:
target1:
resources:
quality_monitors:
quality_monitor1:
baseline_table_name: quality_monitor1
jobs:
job3:
name: job3
registered_models:
registered_model1:
name: registered_model1

View File

@ -0,0 +1,11 @@
resources:
jobs:
job1:
name: job1
targets:
target1:
resources:
jobs:
job1:
description: job1

View File

@ -0,0 +1,4 @@
resources:
pipelines:
pipeline1:
name: pipeline1

View File

@ -0,0 +1,7 @@
resources:
jobs:
job1:
name: job1
job2:
name: job2

View File

@ -0,0 +1,11 @@
resources:
pipelines:
pipeline1:
name: pipeline1
targets:
target1:
resources:
jobs:
job1:
name: job1

View File

@ -0,0 +1,11 @@
resources:
pipelines:
pipeline1:
name: pipeline1
targets:
target1:
resources:
jobs:
job1:
name: job1

View File

@ -0,0 +1,43 @@
resources:
experiments:
experiment1:
name: experiment1
model_serving_endpoints:
model_serving_endpoint1:
name: model_serving_endpoint1
jobs:
job1:
name: job1
job2:
name: job2
models:
model1:
name: model1
pipelines:
pipeline1:
name: pipeline1
pipeline2:
name: pipeline2
schemas:
schema1:
name: schema1
targets:
target1:
resources:
quality_monitors:
quality_monitor1:
baseline_table_name: quality_monitor1
jobs:
job3:
name: job3
registered_models:
registered_model1:
name: registered_model1

View File

@ -0,0 +1,11 @@
resources:
jobs:
job1:
name: job1
targets:
target1:
resources:
jobs:
job2:
name: job2

View File

@ -0,0 +1,11 @@
resources:
jobs:
job1:
name: job1
targets:
target1:
resources:
jobs:
job1:
description: job1

View File

@ -0,0 +1,7 @@
resources:
jobs:
job1:
name: job1
job2:
name: job2

View File

@ -0,0 +1,8 @@
targets:
target1:
resources:
jobs:
job1:
description: job1
job2:
description: job2

View File

@ -212,6 +212,15 @@ func (m *applyPresets) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnos
} }
} }
// Dashboards: Prefix
for key, dashboard := range r.Dashboards {
if dashboard == nil || dashboard.CreateDashboardRequest == nil {
diags = diags.Extend(diag.Errorf("dashboard %s s is not defined", key))
continue
}
dashboard.DisplayName = prefix + dashboard.DisplayName
}
return diags return diags
} }

View File

@ -0,0 +1,70 @@
package mutator
import (
"context"
"github.com/databricks/cli/bundle"
"github.com/databricks/cli/libs/diag"
"github.com/databricks/cli/libs/dyn"
)
type configureDashboardDefaults struct{}
func ConfigureDashboardDefaults() bundle.Mutator {
return &configureDashboardDefaults{}
}
func (m *configureDashboardDefaults) Name() string {
return "ConfigureDashboardDefaults"
}
func (m *configureDashboardDefaults) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
var diags diag.Diagnostics
pattern := dyn.NewPattern(
dyn.Key("resources"),
dyn.Key("dashboards"),
dyn.AnyKey(),
)
// Configure defaults for all dashboards.
err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) {
return dyn.MapByPattern(v, pattern, func(p dyn.Path, v dyn.Value) (dyn.Value, error) {
var err error
v, err = setIfNotExists(v, dyn.NewPath(dyn.Key("parent_path")), dyn.V(b.Config.Workspace.ResourcePath))
if err != nil {
return dyn.InvalidValue, err
}
v, err = setIfNotExists(v, dyn.NewPath(dyn.Key("embed_credentials")), dyn.V(false))
if err != nil {
return dyn.InvalidValue, err
}
return v, nil
})
})
diags = diags.Extend(diag.FromErr(err))
return diags
}
func setIfNotExists(v dyn.Value, path dyn.Path, defaultValue dyn.Value) (dyn.Value, error) {
// Get the field at the specified path (if set).
_, err := dyn.GetByPath(v, path)
switch {
case dyn.IsNoSuchKeyError(err):
// OK, we'll set the default value.
break
case dyn.IsCannotTraverseNilError(err):
// Cannot traverse the value, skip it.
return v, nil
case err == nil:
// The field is set, skip it.
return v, nil
default:
// Return the error.
return v, err
}
// Set the field at the specified path.
return dyn.SetByPath(v, path, defaultValue)
}

View File

@ -0,0 +1,130 @@
package mutator_test
import (
"context"
"testing"
"github.com/databricks/cli/bundle"
"github.com/databricks/cli/bundle/config"
"github.com/databricks/cli/bundle/config/mutator"
"github.com/databricks/cli/bundle/config/resources"
"github.com/databricks/cli/bundle/internal/bundletest"
"github.com/databricks/cli/libs/dyn"
"github.com/databricks/databricks-sdk-go/service/dashboards"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestConfigureDashboardDefaultsParentPath(t *testing.T) {
b := &bundle.Bundle{
Config: config.Root{
Workspace: config.Workspace{
ResourcePath: "/foo/bar",
},
Resources: config.Resources{
Dashboards: map[string]*resources.Dashboard{
"d1": {
// Empty string is skipped.
// See below for how it is set.
CreateDashboardRequest: &dashboards.CreateDashboardRequest{
ParentPath: "",
},
},
"d2": {
// Non-empty string is skipped.
CreateDashboardRequest: &dashboards.CreateDashboardRequest{
ParentPath: "already-set",
},
},
"d3": {
// No parent path set.
},
"d4": nil,
},
},
},
}
// We can't set an empty string in the typed configuration.
// Do it on the dyn.Value directly.
bundletest.Mutate(t, b, func(v dyn.Value) (dyn.Value, error) {
return dyn.Set(v, "resources.dashboards.d1.parent_path", dyn.V(""))
})
diags := bundle.Apply(context.Background(), b, mutator.ConfigureDashboardDefaults())
require.NoError(t, diags.Error())
var v dyn.Value
var err error
// Set to empty string; unchanged.
v, err = dyn.Get(b.Config.Value(), "resources.dashboards.d1.parent_path")
if assert.NoError(t, err) {
assert.Equal(t, "", v.MustString())
}
// Set to "already-set"; unchanged.
v, err = dyn.Get(b.Config.Value(), "resources.dashboards.d2.parent_path")
if assert.NoError(t, err) {
assert.Equal(t, "already-set", v.MustString())
}
// Not set; now set to the workspace resource path.
v, err = dyn.Get(b.Config.Value(), "resources.dashboards.d3.parent_path")
if assert.NoError(t, err) {
assert.Equal(t, "/foo/bar", v.MustString())
}
// No valid dashboard; no change.
_, err = dyn.Get(b.Config.Value(), "resources.dashboards.d4.parent_path")
assert.True(t, dyn.IsCannotTraverseNilError(err))
}
func TestConfigureDashboardDefaultsEmbedCredentials(t *testing.T) {
b := &bundle.Bundle{
Config: config.Root{
Resources: config.Resources{
Dashboards: map[string]*resources.Dashboard{
"d1": {
EmbedCredentials: true,
},
"d2": {
EmbedCredentials: false,
},
"d3": {
// No parent path set.
},
"d4": nil,
},
},
},
}
diags := bundle.Apply(context.Background(), b, mutator.ConfigureDashboardDefaults())
require.NoError(t, diags.Error())
var v dyn.Value
var err error
// Set to true; still true.
v, err = dyn.Get(b.Config.Value(), "resources.dashboards.d1.embed_credentials")
if assert.NoError(t, err) {
assert.Equal(t, true, v.MustBool())
}
// Set to false; still false.
v, err = dyn.Get(b.Config.Value(), "resources.dashboards.d2.embed_credentials")
if assert.NoError(t, err) {
assert.Equal(t, false, v.MustBool())
}
// Not set; now false.
v, err = dyn.Get(b.Config.Value(), "resources.dashboards.d3.embed_credentials")
if assert.NoError(t, err) {
assert.Equal(t, false, v.MustBool())
}
// No valid dashboard; no change.
_, err = dyn.Get(b.Config.Value(), "resources.dashboards.d4.embed_credentials")
assert.True(t, dyn.IsCannotTraverseNilError(err))
}

View File

@ -0,0 +1,65 @@
package mutator
import (
"context"
"net/url"
"strconv"
"strings"
"github.com/databricks/cli/bundle"
"github.com/databricks/cli/libs/diag"
)
type initializeURLs struct {
}
// InitializeURLs makes sure the URL field of each resource is configured.
// NOTE: since this depends on an extra API call, this mutator adds some extra
// latency. As such, it should only be used when needed.
// This URL field is used for the output of the 'bundle summary' CLI command.
func InitializeURLs() bundle.Mutator {
return &initializeURLs{}
}
func (m *initializeURLs) Name() string {
return "InitializeURLs"
}
func (m *initializeURLs) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
workspaceId, err := b.WorkspaceClient().CurrentWorkspaceID(ctx)
if err != nil {
return diag.FromErr(err)
}
orgId := strconv.FormatInt(workspaceId, 10)
host := b.WorkspaceClient().Config.CanonicalHostName()
initializeForWorkspace(b, orgId, host)
return nil
}
func initializeForWorkspace(b *bundle.Bundle, orgId string, host string) error {
baseURL, err := url.Parse(host)
if err != nil {
return err
}
// Add ?o=<workspace id> only if <workspace id> wasn't in the subdomain already.
// The ?o= is needed when vanity URLs / legacy workspace URLs are used.
// If it's not needed we prefer to leave it out since these URLs are rather
// long for most terminals.
//
// See https://docs.databricks.com/en/workspace/workspace-details.html for
// further reading about the '?o=' suffix.
if !strings.Contains(baseURL.Hostname(), orgId) {
values := baseURL.Query()
values.Add("o", orgId)
baseURL.RawQuery = values.Encode()
}
for _, group := range b.Config.Resources.AllResources() {
for _, r := range group.Resources {
r.InitializeURL(*baseURL)
}
}
return nil
}

View File

@ -0,0 +1,140 @@
package mutator
import (
"testing"
"github.com/databricks/cli/bundle"
"github.com/databricks/cli/bundle/config"
"github.com/databricks/cli/bundle/config/resources"
"github.com/databricks/databricks-sdk-go/service/catalog"
"github.com/databricks/databricks-sdk-go/service/compute"
"github.com/databricks/databricks-sdk-go/service/dashboards"
"github.com/databricks/databricks-sdk-go/service/jobs"
"github.com/databricks/databricks-sdk-go/service/ml"
"github.com/databricks/databricks-sdk-go/service/pipelines"
"github.com/databricks/databricks-sdk-go/service/serving"
"github.com/stretchr/testify/require"
)
func TestInitializeURLs(t *testing.T) {
b := &bundle.Bundle{
Config: config.Root{
Workspace: config.Workspace{
Host: "https://mycompany.databricks.com/",
},
Resources: config.Resources{
Jobs: map[string]*resources.Job{
"job1": {
ID: "1",
JobSettings: &jobs.JobSettings{Name: "job1"},
},
},
Pipelines: map[string]*resources.Pipeline{
"pipeline1": {
ID: "3",
PipelineSpec: &pipelines.PipelineSpec{Name: "pipeline1"},
},
},
Experiments: map[string]*resources.MlflowExperiment{
"experiment1": {
ID: "4",
Experiment: &ml.Experiment{Name: "experiment1"},
},
},
Models: map[string]*resources.MlflowModel{
"model1": {
ID: "a model uses its name for identifier",
Model: &ml.Model{Name: "a model uses its name for identifier"},
},
},
ModelServingEndpoints: map[string]*resources.ModelServingEndpoint{
"servingendpoint1": {
ID: "my_serving_endpoint",
CreateServingEndpoint: &serving.CreateServingEndpoint{
Name: "my_serving_endpoint",
},
},
},
RegisteredModels: map[string]*resources.RegisteredModel{
"registeredmodel1": {
ID: "8",
CreateRegisteredModelRequest: &catalog.CreateRegisteredModelRequest{
Name: "my_registered_model",
},
},
},
QualityMonitors: map[string]*resources.QualityMonitor{
"qualityMonitor1": {
CreateMonitor: &catalog.CreateMonitor{
TableName: "catalog.schema.qualityMonitor1",
},
},
},
Schemas: map[string]*resources.Schema{
"schema1": {
ID: "catalog.schema",
CreateSchema: &catalog.CreateSchema{
Name: "schema",
},
},
},
Clusters: map[string]*resources.Cluster{
"cluster1": {
ID: "1017-103929-vlr7jzcf",
ClusterSpec: &compute.ClusterSpec{
ClusterName: "cluster1",
},
},
},
Dashboards: map[string]*resources.Dashboard{
"dashboard1": {
ID: "01ef8d56871e1d50ae30ce7375e42478",
CreateDashboardRequest: &dashboards.CreateDashboardRequest{
DisplayName: "My special dashboard",
},
},
},
},
},
}
expectedURLs := map[string]string{
"job1": "https://mycompany.databricks.com/jobs/1?o=123456",
"pipeline1": "https://mycompany.databricks.com/pipelines/3?o=123456",
"experiment1": "https://mycompany.databricks.com/ml/experiments/4?o=123456",
"model1": "https://mycompany.databricks.com/ml/models/a%20model%20uses%20its%20name%20for%20identifier?o=123456",
"servingendpoint1": "https://mycompany.databricks.com/ml/endpoints/my_serving_endpoint?o=123456",
"registeredmodel1": "https://mycompany.databricks.com/explore/data/models/8?o=123456",
"qualityMonitor1": "https://mycompany.databricks.com/explore/data/catalog/schema/qualityMonitor1?o=123456",
"schema1": "https://mycompany.databricks.com/explore/data/catalog/schema?o=123456",
"cluster1": "https://mycompany.databricks.com/compute/clusters/1017-103929-vlr7jzcf?o=123456",
"dashboard1": "https://mycompany.databricks.com/dashboardsv3/01ef8d56871e1d50ae30ce7375e42478/published?o=123456",
}
initializeForWorkspace(b, "123456", "https://mycompany.databricks.com/")
for _, group := range b.Config.Resources.AllResources() {
for key, r := range group.Resources {
require.Equal(t, expectedURLs[key], r.GetURL(), "Unexpected URL for "+key)
}
}
}
func TestInitializeURLsWithoutOrgId(t *testing.T) {
b := &bundle.Bundle{
Config: config.Root{
Resources: config.Resources{
Jobs: map[string]*resources.Job{
"job1": {
ID: "1",
JobSettings: &jobs.JobSettings{Name: "job1"},
},
},
},
},
}
initializeForWorkspace(b, "123456", "https://adb-123456.azuredatabricks.net/")
require.Equal(t, "https://adb-123456.azuredatabricks.net/jobs/1", b.Config.Resources.Jobs["job1"].URL)
}

View File

@ -5,8 +5,8 @@ import (
"github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle"
"github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config"
"github.com/databricks/cli/libs/auth"
"github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/diag"
"github.com/databricks/cli/libs/iamutil"
"github.com/databricks/cli/libs/tags" "github.com/databricks/cli/libs/tags"
) )
@ -33,7 +33,7 @@ func (m *populateCurrentUser) Apply(ctx context.Context, b *bundle.Bundle) diag.
} }
b.Config.Workspace.CurrentUser = &config.User{ b.Config.Workspace.CurrentUser = &config.User{
ShortName: auth.GetShortUserName(me), ShortName: iamutil.GetShortUserName(me),
User: me, User: me,
} }

View File

@ -6,9 +6,9 @@ import (
"github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle"
"github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config"
"github.com/databricks/cli/libs/auth"
"github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/diag"
"github.com/databricks/cli/libs/dyn" "github.com/databricks/cli/libs/dyn"
"github.com/databricks/cli/libs/iamutil"
"github.com/databricks/cli/libs/log" "github.com/databricks/cli/libs/log"
) )
@ -174,7 +174,7 @@ func (m *processTargetMode) Apply(ctx context.Context, b *bundle.Bundle) diag.Di
transformDevelopmentMode(ctx, b) transformDevelopmentMode(ctx, b)
return diags return diags
case config.Production: case config.Production:
isPrincipal := auth.IsServicePrincipal(b.Config.Workspace.CurrentUser.UserName) isPrincipal := iamutil.IsServicePrincipal(b.Config.Workspace.CurrentUser.User)
return validateProductionMode(ctx, b, isPrincipal) return validateProductionMode(ctx, b, isPrincipal)
case "": case "":
// No action // No action

View File

@ -14,6 +14,7 @@ import (
sdkconfig "github.com/databricks/databricks-sdk-go/config" sdkconfig "github.com/databricks/databricks-sdk-go/config"
"github.com/databricks/databricks-sdk-go/service/catalog" "github.com/databricks/databricks-sdk-go/service/catalog"
"github.com/databricks/databricks-sdk-go/service/compute" "github.com/databricks/databricks-sdk-go/service/compute"
"github.com/databricks/databricks-sdk-go/service/dashboards"
"github.com/databricks/databricks-sdk-go/service/iam" "github.com/databricks/databricks-sdk-go/service/iam"
"github.com/databricks/databricks-sdk-go/service/jobs" "github.com/databricks/databricks-sdk-go/service/jobs"
"github.com/databricks/databricks-sdk-go/service/ml" "github.com/databricks/databricks-sdk-go/service/ml"
@ -123,6 +124,13 @@ func mockBundle(mode config.Mode) *bundle.Bundle {
Clusters: map[string]*resources.Cluster{ Clusters: map[string]*resources.Cluster{
"cluster1": {ClusterSpec: &compute.ClusterSpec{ClusterName: "cluster1", SparkVersion: "13.2.x", NumWorkers: 1}}, "cluster1": {ClusterSpec: &compute.ClusterSpec{ClusterName: "cluster1", SparkVersion: "13.2.x", NumWorkers: 1}},
}, },
Dashboards: map[string]*resources.Dashboard{
"dashboard1": {
CreateDashboardRequest: &dashboards.CreateDashboardRequest{
DisplayName: "dashboard1",
},
},
},
}, },
}, },
// Use AWS implementation for testing. // Use AWS implementation for testing.
@ -184,6 +192,9 @@ func TestProcessTargetModeDevelopment(t *testing.T) {
// Clusters // Clusters
assert.Equal(t, "[dev lennart] cluster1", b.Config.Resources.Clusters["cluster1"].ClusterName) assert.Equal(t, "[dev lennart] cluster1", b.Config.Resources.Clusters["cluster1"].ClusterName)
// Dashboards
assert.Equal(t, "[dev lennart] dashboard1", b.Config.Resources.Dashboards["dashboard1"].DisplayName)
} }
func TestProcessTargetModeDevelopmentTagNormalizationForAws(t *testing.T) { func TestProcessTargetModeDevelopmentTagNormalizationForAws(t *testing.T) {

View File

@ -30,50 +30,44 @@ func (m *setRunAs) Name() string {
return "SetRunAs" return "SetRunAs"
} }
type errUnsupportedResourceTypeForRunAs struct { func reportRunAsNotSupported(resourceType string, location dyn.Location, currentUser string, runAsUser string) diag.Diagnostics {
resourceType string return diag.Diagnostics{{
resourceLocation dyn.Location Summary: fmt.Sprintf("%s do not support a setting a run_as user that is different from the owner.\n"+
currentUser string "Current identity: %s. Run as identity: %s.\n"+
runAsUser string "See https://docs.databricks.com/dev-tools/bundles/run-as.html to learn more about the run_as property.", resourceType, currentUser, runAsUser),
Locations: []dyn.Location{location},
Severity: diag.Error,
}}
} }
func (e errUnsupportedResourceTypeForRunAs) Error() string { func validateRunAs(b *bundle.Bundle) diag.Diagnostics {
return fmt.Sprintf("%s are not supported when the current deployment user is different from the bundle's run_as identity. Please deploy as the run_as identity. Please refer to the documentation at https://docs.databricks.com/dev-tools/bundles/run-as.html for more details. Location of the unsupported resource: %s. Current identity: %s. Run as identity: %s", e.resourceType, e.resourceLocation, e.currentUser, e.runAsUser) diags := diag.Diagnostics{}
}
type errBothSpAndUserSpecified struct { neitherSpecifiedErr := diag.Diagnostics{{
spName string Summary: "run_as section must specify exactly one identity. Neither service_principal_name nor user_name is specified",
spLoc dyn.Location Locations: []dyn.Location{b.Config.GetLocation("run_as")},
userName string Severity: diag.Error,
userLoc dyn.Location }}
}
func (e errBothSpAndUserSpecified) Error() string { // Fail fast if neither service_principal_name nor user_name are specified, but the
return fmt.Sprintf("run_as section must specify exactly one identity. A service_principal_name %q is specified at %s. A user_name %q is defined at %s", e.spName, e.spLoc, e.userName, e.userLoc)
}
func validateRunAs(b *bundle.Bundle) error {
neitherSpecifiedErr := fmt.Errorf("run_as section must specify exactly one identity. Neither service_principal_name nor user_name is specified at %s", b.Config.GetLocation("run_as"))
// Error if neither service_principal_name nor user_name are specified, but the
// run_as section is present. // run_as section is present.
if b.Config.Value().Get("run_as").Kind() == dyn.KindNil { if b.Config.Value().Get("run_as").Kind() == dyn.KindNil {
return neitherSpecifiedErr return neitherSpecifiedErr
} }
// Error if one or both of service_principal_name and user_name are specified,
// Fail fast if one or both of service_principal_name and user_name are specified,
// but with empty values. // but with empty values.
if b.Config.RunAs.ServicePrincipalName == "" && b.Config.RunAs.UserName == "" { runAs := b.Config.RunAs
if runAs.ServicePrincipalName == "" && runAs.UserName == "" {
return neitherSpecifiedErr return neitherSpecifiedErr
} }
// Error if both service_principal_name and user_name are specified
runAs := b.Config.RunAs
if runAs.UserName != "" && runAs.ServicePrincipalName != "" { if runAs.UserName != "" && runAs.ServicePrincipalName != "" {
return errBothSpAndUserSpecified{ diags = diags.Extend(diag.Diagnostics{{
spName: runAs.ServicePrincipalName, Summary: "run_as section cannot specify both user_name and service_principal_name",
userName: runAs.UserName, Locations: []dyn.Location{b.Config.GetLocation("run_as")},
spLoc: b.Config.GetLocation("run_as.service_principal_name"), Severity: diag.Error,
userLoc: b.Config.GetLocation("run_as.user_name"), }})
}
} }
identity := runAs.ServicePrincipalName identity := runAs.ServicePrincipalName
@ -83,40 +77,50 @@ func validateRunAs(b *bundle.Bundle) error {
// All resources are supported if the run_as identity is the same as the current deployment identity. // All resources are supported if the run_as identity is the same as the current deployment identity.
if identity == b.Config.Workspace.CurrentUser.UserName { if identity == b.Config.Workspace.CurrentUser.UserName {
return nil return diags
} }
// DLT pipelines do not support run_as in the API. // DLT pipelines do not support run_as in the API.
if len(b.Config.Resources.Pipelines) > 0 { if len(b.Config.Resources.Pipelines) > 0 {
return errUnsupportedResourceTypeForRunAs{ diags = diags.Extend(reportRunAsNotSupported(
resourceType: "pipelines", "pipelines",
resourceLocation: b.Config.GetLocation("resources.pipelines"), b.Config.GetLocation("resources.pipelines"),
currentUser: b.Config.Workspace.CurrentUser.UserName, b.Config.Workspace.CurrentUser.UserName,
runAsUser: identity, identity,
} ))
} }
// Model serving endpoints do not support run_as in the API. // Model serving endpoints do not support run_as in the API.
if len(b.Config.Resources.ModelServingEndpoints) > 0 { if len(b.Config.Resources.ModelServingEndpoints) > 0 {
return errUnsupportedResourceTypeForRunAs{ diags = diags.Extend(reportRunAsNotSupported(
resourceType: "model_serving_endpoints", "model_serving_endpoints",
resourceLocation: b.Config.GetLocation("resources.model_serving_endpoints"), b.Config.GetLocation("resources.model_serving_endpoints"),
currentUser: b.Config.Workspace.CurrentUser.UserName, b.Config.Workspace.CurrentUser.UserName,
runAsUser: identity, identity,
} ))
} }
// Monitors do not support run_as in the API. // Monitors do not support run_as in the API.
if len(b.Config.Resources.QualityMonitors) > 0 { if len(b.Config.Resources.QualityMonitors) > 0 {
return errUnsupportedResourceTypeForRunAs{ diags = diags.Extend(reportRunAsNotSupported(
resourceType: "quality_monitors", "quality_monitors",
resourceLocation: b.Config.GetLocation("resources.quality_monitors"), b.Config.GetLocation("resources.quality_monitors"),
currentUser: b.Config.Workspace.CurrentUser.UserName, b.Config.Workspace.CurrentUser.UserName,
runAsUser: identity, identity,
} ))
} }
return nil // Dashboards do not support run_as in the API.
if len(b.Config.Resources.Dashboards) > 0 {
diags = diags.Extend(reportRunAsNotSupported(
"dashboards",
b.Config.GetLocation("resources.dashboards"),
b.Config.Workspace.CurrentUser.UserName,
identity,
))
}
return diags
} }
func setRunAsForJobs(b *bundle.Bundle) { func setRunAsForJobs(b *bundle.Bundle) {
@ -187,8 +191,9 @@ func (m *setRunAs) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnostics {
} }
// Assert the run_as configuration is valid in the context of the bundle // Assert the run_as configuration is valid in the context of the bundle
if err := validateRunAs(b); err != nil { diags := validateRunAs(b)
return diag.FromErr(err) if diags.HasError() {
return diags
} }
setRunAsForJobs(b) setRunAsForJobs(b)

View File

@ -33,6 +33,7 @@ func allResourceTypes(t *testing.T) []string {
// also update this check when adding a new resource // also update this check when adding a new resource
require.Equal(t, []string{ require.Equal(t, []string{
"clusters", "clusters",
"dashboards",
"experiments", "experiments",
"jobs", "jobs",
"model_serving_endpoints", "model_serving_endpoints",
@ -188,11 +189,9 @@ func TestRunAsErrorForUnsupportedResources(t *testing.T) {
Config: *r, Config: *r,
} }
diags := bundle.Apply(context.Background(), b, SetRunAs()) diags := bundle.Apply(context.Background(), b, SetRunAs())
assert.Equal(t, diags.Error().Error(), errUnsupportedResourceTypeForRunAs{ require.Error(t, diags.Error())
resourceType: rt, assert.Contains(t, diags.Error().Error(), "do not support a setting a run_as user that is different from the owner.\n"+
resourceLocation: dyn.Location{}, "Current identity: alice. Run as identity: bob.\n"+
currentUser: "alice", "See https://docs.databricks.com/dev-tools/bundles/run-as.html to learn more about the run_as property.", rt)
runAsUser: "bob",
}.Error(), "expected run_as with a different identity than the current deployment user to not supported for resources of type: %s", rt)
} }
} }

View File

@ -162,6 +162,20 @@ func (t *translateContext) translateNoOp(literal, localFullPath, localRelPath, r
return localRelPath, nil return localRelPath, nil
} }
func (t *translateContext) retainLocalAbsoluteFilePath(literal, localFullPath, localRelPath, remotePath string) (string, error) {
info, err := t.b.SyncRoot.Stat(localRelPath)
if errors.Is(err, fs.ErrNotExist) {
return "", fmt.Errorf("file %s not found", literal)
}
if err != nil {
return "", fmt.Errorf("unable to determine if %s is a file: %w", localFullPath, err)
}
if info.IsDir() {
return "", fmt.Errorf("expected %s to be a file but found a directory", literal)
}
return localFullPath, nil
}
func (t *translateContext) translateNoOpWithPrefix(literal, localFullPath, localRelPath, remotePath string) (string, error) { func (t *translateContext) translateNoOpWithPrefix(literal, localFullPath, localRelPath, remotePath string) (string, error) {
if !strings.HasPrefix(localRelPath, ".") { if !strings.HasPrefix(localRelPath, ".") {
localRelPath = "." + string(filepath.Separator) + localRelPath localRelPath = "." + string(filepath.Separator) + localRelPath
@ -215,6 +229,7 @@ func (m *translatePaths) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnos
t.applyJobTranslations, t.applyJobTranslations,
t.applyPipelineTranslations, t.applyPipelineTranslations,
t.applyArtifactTranslations, t.applyArtifactTranslations,
t.applyDashboardTranslations,
} { } {
v, err = fn(v) v, err = fn(v)
if err != nil { if err != nil {

View File

@ -0,0 +1,28 @@
package mutator
import (
"fmt"
"github.com/databricks/cli/libs/dyn"
)
func (t *translateContext) applyDashboardTranslations(v dyn.Value) (dyn.Value, error) {
// Convert the `file_path` field to a local absolute path.
// We load the file at this path and use its contents for the dashboard contents.
pattern := dyn.NewPattern(
dyn.Key("resources"),
dyn.Key("dashboards"),
dyn.AnyKey(),
dyn.Key("file_path"),
)
return dyn.MapByPattern(v, pattern, func(p dyn.Path, v dyn.Value) (dyn.Value, error) {
key := p[2].Key()
dir, err := v.Location().Directory()
if err != nil {
return dyn.InvalidValue, fmt.Errorf("unable to determine directory for dashboard %s: %w", key, err)
}
return t.rewriteRelativeTo(p, v, t.retainLocalAbsoluteFilePath, dir, "")
})
}

View File

@ -699,6 +699,9 @@ func TestTranslatePathJobEnvironments(t *testing.T) {
"../dist/env2.whl", "../dist/env2.whl",
"simplejson", "simplejson",
"/Workspace/Users/foo@bar.com/test.whl", "/Workspace/Users/foo@bar.com/test.whl",
"--extra-index-url https://name:token@gitlab.com/api/v4/projects/9876/packages/pypi/simple foobar",
"foobar --extra-index-url https://name:token@gitlab.com/api/v4/projects/9876/packages/pypi/simple",
"https://foo@bar.com/packages/pypi/simple",
}, },
}, },
}, },
@ -719,6 +722,9 @@ func TestTranslatePathJobEnvironments(t *testing.T) {
assert.Equal(t, strings.Join([]string{".", "dist", "env2.whl"}, string(os.PathSeparator)), b.Config.Resources.Jobs["job"].JobSettings.Environments[0].Spec.Dependencies[1]) assert.Equal(t, strings.Join([]string{".", "dist", "env2.whl"}, string(os.PathSeparator)), b.Config.Resources.Jobs["job"].JobSettings.Environments[0].Spec.Dependencies[1])
assert.Equal(t, "simplejson", b.Config.Resources.Jobs["job"].JobSettings.Environments[0].Spec.Dependencies[2]) assert.Equal(t, "simplejson", b.Config.Resources.Jobs["job"].JobSettings.Environments[0].Spec.Dependencies[2])
assert.Equal(t, "/Workspace/Users/foo@bar.com/test.whl", b.Config.Resources.Jobs["job"].JobSettings.Environments[0].Spec.Dependencies[3]) assert.Equal(t, "/Workspace/Users/foo@bar.com/test.whl", b.Config.Resources.Jobs["job"].JobSettings.Environments[0].Spec.Dependencies[3])
assert.Equal(t, "--extra-index-url https://name:token@gitlab.com/api/v4/projects/9876/packages/pypi/simple foobar", b.Config.Resources.Jobs["job"].JobSettings.Environments[0].Spec.Dependencies[4])
assert.Equal(t, "foobar --extra-index-url https://name:token@gitlab.com/api/v4/projects/9876/packages/pypi/simple", b.Config.Resources.Jobs["job"].JobSettings.Environments[0].Spec.Dependencies[5])
assert.Equal(t, "https://foo@bar.com/packages/pypi/simple", b.Config.Resources.Jobs["job"].JobSettings.Environments[0].Spec.Dependencies[6])
} }
func TestTranslatePathWithComplexVariables(t *testing.T) { func TestTranslatePathWithComplexVariables(t *testing.T) {

View File

@ -3,6 +3,7 @@ package config
import ( import (
"context" "context"
"fmt" "fmt"
"net/url"
"github.com/databricks/cli/bundle/config/resources" "github.com/databricks/cli/bundle/config/resources"
"github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go"
@ -20,6 +21,7 @@ type Resources struct {
QualityMonitors map[string]*resources.QualityMonitor `json:"quality_monitors,omitempty"` QualityMonitors map[string]*resources.QualityMonitor `json:"quality_monitors,omitempty"`
Schemas map[string]*resources.Schema `json:"schemas,omitempty"` Schemas map[string]*resources.Schema `json:"schemas,omitempty"`
Clusters map[string]*resources.Cluster `json:"clusters,omitempty"` Clusters map[string]*resources.Cluster `json:"clusters,omitempty"`
Dashboards map[string]*resources.Dashboard `json:"dashboards,omitempty"`
} }
type ConfigResource interface { type ConfigResource interface {
@ -30,6 +32,54 @@ type ConfigResource interface {
// Terraform equivalent name of the resource. For example "databricks_job" // Terraform equivalent name of the resource. For example "databricks_job"
// for jobs and "databricks_pipeline" for pipelines. // for jobs and "databricks_pipeline" for pipelines.
TerraformResourceName() string TerraformResourceName() string
// GetName returns the in-product name of the resource.
GetName() string
// GetURL returns the URL of the resource.
GetURL() string
// InitializeURL initializes the URL field of the resource.
InitializeURL(baseURL url.URL)
}
// ResourceGroup represents a group of resources of the same type.
// It includes a description of the resource type and a map of resources.
type ResourceGroup struct {
Description ResourceDescription
Resources map[string]ConfigResource
}
// collectResourceMap collects resources of a specific type into a ResourceGroup.
func collectResourceMap[T ConfigResource](
description ResourceDescription,
input map[string]T,
) ResourceGroup {
resources := make(map[string]ConfigResource)
for key, resource := range input {
resources[key] = resource
}
return ResourceGroup{
Description: description,
Resources: resources,
}
}
// AllResources returns all resources in the bundle grouped by their resource type.
func (r *Resources) AllResources() []ResourceGroup {
descriptions := SupportedResources()
return []ResourceGroup{
collectResourceMap(descriptions["jobs"], r.Jobs),
collectResourceMap(descriptions["pipelines"], r.Pipelines),
collectResourceMap(descriptions["models"], r.Models),
collectResourceMap(descriptions["experiments"], r.Experiments),
collectResourceMap(descriptions["model_serving_endpoints"], r.ModelServingEndpoints),
collectResourceMap(descriptions["registered_models"], r.RegisteredModels),
collectResourceMap(descriptions["quality_monitors"], r.QualityMonitors),
collectResourceMap(descriptions["schemas"], r.Schemas),
collectResourceMap(descriptions["clusters"], r.Clusters),
collectResourceMap(descriptions["dashboards"], r.Dashboards),
}
} }
func (r *Resources) FindResourceByConfigKey(key string) (ConfigResource, error) { func (r *Resources) FindResourceByConfigKey(key string) (ConfigResource, error) {
@ -59,3 +109,79 @@ func (r *Resources) FindResourceByConfigKey(key string) (ConfigResource, error)
return found[0], nil return found[0], nil
} }
type ResourceDescription struct {
// Singular and plural name when used to refer to the configuration.
SingularName string
PluralName string
// Singular and plural title when used in summaries / terminal UI.
SingularTitle string
PluralTitle string
}
// The keys of the map corresponds to the resource key in the bundle configuration.
func SupportedResources() map[string]ResourceDescription {
return map[string]ResourceDescription{
"jobs": {
SingularName: "job",
PluralName: "jobs",
SingularTitle: "Job",
PluralTitle: "Jobs",
},
"pipelines": {
SingularName: "pipeline",
PluralName: "pipelines",
SingularTitle: "Pipeline",
PluralTitle: "Pipelines",
},
"models": {
SingularName: "model",
PluralName: "models",
SingularTitle: "Model",
PluralTitle: "Models",
},
"experiments": {
SingularName: "experiment",
PluralName: "experiments",
SingularTitle: "Experiment",
PluralTitle: "Experiments",
},
"model_serving_endpoints": {
SingularName: "model_serving_endpoint",
PluralName: "model_serving_endpoints",
SingularTitle: "Model Serving Endpoint",
PluralTitle: "Model Serving Endpoints",
},
"registered_models": {
SingularName: "registered_model",
PluralName: "registered_models",
SingularTitle: "Registered Model",
PluralTitle: "Registered Models",
},
"quality_monitors": {
SingularName: "quality_monitor",
PluralName: "quality_monitors",
SingularTitle: "Quality Monitor",
PluralTitle: "Quality Monitors",
},
"schemas": {
SingularName: "schema",
PluralName: "schemas",
SingularTitle: "Schema",
PluralTitle: "Schemas",
},
"clusters": {
SingularName: "cluster",
PluralName: "clusters",
SingularTitle: "Cluster",
PluralTitle: "Clusters",
},
"dashboards": {
SingularName: "dashboard",
PluralName: "dashboards",
SingularTitle: "Dashboard",
PluralTitle: "Dashboards",
},
}
}

View File

@ -2,6 +2,8 @@ package resources
import ( import (
"context" "context"
"fmt"
"net/url"
"github.com/databricks/cli/libs/log" "github.com/databricks/cli/libs/log"
"github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go"
@ -13,6 +15,7 @@ type Cluster struct {
ID string `json:"id,omitempty" bundle:"readonly"` ID string `json:"id,omitempty" bundle:"readonly"`
Permissions []Permission `json:"permissions,omitempty"` Permissions []Permission `json:"permissions,omitempty"`
ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"` ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"`
URL string `json:"url,omitempty" bundle:"internal"`
*compute.ClusterSpec *compute.ClusterSpec
} }
@ -37,3 +40,19 @@ func (s *Cluster) Exists(ctx context.Context, w *databricks.WorkspaceClient, id
func (s *Cluster) TerraformResourceName() string { func (s *Cluster) TerraformResourceName() string {
return "databricks_cluster" return "databricks_cluster"
} }
func (s *Cluster) InitializeURL(baseURL url.URL) {
if s.ID == "" {
return
}
baseURL.Path = fmt.Sprintf("compute/clusters/%s", s.ID)
s.URL = baseURL.String()
}
func (s *Cluster) GetName() string {
return s.ClusterName
}
func (s *Cluster) GetURL() string {
return s.URL
}

View File

@ -0,0 +1,81 @@
package resources
import (
"context"
"fmt"
"net/url"
"github.com/databricks/cli/libs/log"
"github.com/databricks/databricks-sdk-go"
"github.com/databricks/databricks-sdk-go/marshal"
"github.com/databricks/databricks-sdk-go/service/dashboards"
)
type Dashboard struct {
ID string `json:"id,omitempty" bundle:"readonly"`
Permissions []Permission `json:"permissions,omitempty"`
ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"`
URL string `json:"url,omitempty" bundle:"internal"`
*dashboards.CreateDashboardRequest
// =========================
// === Additional fields ===
// =========================
// SerializedDashboard holds the contents of the dashboard in serialized JSON form.
// We override the field's type from the SDK struct here to allow for inlining as YAML.
// If the value is a string, it is used as is.
// If it is not a string, its contents is marshalled as JSON.
SerializedDashboard any `json:"serialized_dashboard,omitempty"`
// EmbedCredentials is a flag to indicate if the publisher's credentials should
// be embedded in the published dashboard. These embedded credentials will be used
// to execute the published dashboard's queries.
//
// Defaults to false if not set.
EmbedCredentials bool `json:"embed_credentials,omitempty"`
// FilePath points to the local `.lvdash.json` file containing the dashboard definition.
FilePath string `json:"file_path,omitempty"`
}
func (r *Dashboard) UnmarshalJSON(b []byte) error {
return marshal.Unmarshal(b, r)
}
func (r Dashboard) MarshalJSON() ([]byte, error) {
return marshal.Marshal(r)
}
func (*Dashboard) Exists(ctx context.Context, w *databricks.WorkspaceClient, id string) (bool, error) {
_, err := w.Lakeview.Get(ctx, dashboards.GetDashboardRequest{
DashboardId: id,
})
if err != nil {
log.Debugf(ctx, "dashboard %s does not exist", id)
return false, err
}
return true, nil
}
func (*Dashboard) TerraformResourceName() string {
return "databricks_dashboard"
}
func (r *Dashboard) InitializeURL(baseURL url.URL) {
if r.ID == "" {
return
}
baseURL.Path = fmt.Sprintf("dashboardsv3/%s/published", r.ID)
r.URL = baseURL.String()
}
func (r *Dashboard) GetName() string {
return r.DisplayName
}
func (r *Dashboard) GetURL() string {
return r.URL
}

View File

@ -2,6 +2,8 @@ package resources
import ( import (
"context" "context"
"fmt"
"net/url"
"strconv" "strconv"
"github.com/databricks/cli/libs/log" "github.com/databricks/cli/libs/log"
@ -14,6 +16,7 @@ type Job struct {
ID string `json:"id,omitempty" bundle:"readonly"` ID string `json:"id,omitempty" bundle:"readonly"`
Permissions []Permission `json:"permissions,omitempty"` Permissions []Permission `json:"permissions,omitempty"`
ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"` ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"`
URL string `json:"url,omitempty" bundle:"internal"`
*jobs.JobSettings *jobs.JobSettings
} }
@ -44,3 +47,19 @@ func (j *Job) Exists(ctx context.Context, w *databricks.WorkspaceClient, id stri
func (j *Job) TerraformResourceName() string { func (j *Job) TerraformResourceName() string {
return "databricks_job" return "databricks_job"
} }
func (j *Job) InitializeURL(baseURL url.URL) {
if j.ID == "" {
return
}
baseURL.Path = fmt.Sprintf("jobs/%s", j.ID)
j.URL = baseURL.String()
}
func (j *Job) GetName() string {
return j.Name
}
func (j *Job) GetURL() string {
return j.URL
}

View File

@ -2,6 +2,8 @@ package resources
import ( import (
"context" "context"
"fmt"
"net/url"
"github.com/databricks/cli/libs/log" "github.com/databricks/cli/libs/log"
"github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go"
@ -13,6 +15,7 @@ type MlflowExperiment struct {
ID string `json:"id,omitempty" bundle:"readonly"` ID string `json:"id,omitempty" bundle:"readonly"`
Permissions []Permission `json:"permissions,omitempty"` Permissions []Permission `json:"permissions,omitempty"`
ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"` ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"`
URL string `json:"url,omitempty" bundle:"internal"`
*ml.Experiment *ml.Experiment
} }
@ -39,3 +42,19 @@ func (s *MlflowExperiment) Exists(ctx context.Context, w *databricks.WorkspaceCl
func (s *MlflowExperiment) TerraformResourceName() string { func (s *MlflowExperiment) TerraformResourceName() string {
return "databricks_mlflow_experiment" return "databricks_mlflow_experiment"
} }
func (s *MlflowExperiment) InitializeURL(baseURL url.URL) {
if s.ID == "" {
return
}
baseURL.Path = fmt.Sprintf("ml/experiments/%s", s.ID)
s.URL = baseURL.String()
}
func (s *MlflowExperiment) GetName() string {
return s.Name
}
func (s *MlflowExperiment) GetURL() string {
return s.URL
}

View File

@ -2,6 +2,8 @@ package resources
import ( import (
"context" "context"
"fmt"
"net/url"
"github.com/databricks/cli/libs/log" "github.com/databricks/cli/libs/log"
"github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go"
@ -13,6 +15,7 @@ type MlflowModel struct {
ID string `json:"id,omitempty" bundle:"readonly"` ID string `json:"id,omitempty" bundle:"readonly"`
Permissions []Permission `json:"permissions,omitempty"` Permissions []Permission `json:"permissions,omitempty"`
ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"` ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"`
URL string `json:"url,omitempty" bundle:"internal"`
*ml.Model *ml.Model
} }
@ -39,3 +42,19 @@ func (s *MlflowModel) Exists(ctx context.Context, w *databricks.WorkspaceClient,
func (s *MlflowModel) TerraformResourceName() string { func (s *MlflowModel) TerraformResourceName() string {
return "databricks_mlflow_model" return "databricks_mlflow_model"
} }
func (s *MlflowModel) InitializeURL(baseURL url.URL) {
if s.ID == "" {
return
}
baseURL.Path = fmt.Sprintf("ml/models/%s", s.ID)
s.URL = baseURL.String()
}
func (s *MlflowModel) GetName() string {
return s.Name
}
func (s *MlflowModel) GetURL() string {
return s.URL
}

View File

@ -2,6 +2,8 @@ package resources
import ( import (
"context" "context"
"fmt"
"net/url"
"github.com/databricks/cli/libs/log" "github.com/databricks/cli/libs/log"
"github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go"
@ -23,6 +25,7 @@ type ModelServingEndpoint struct {
Permissions []Permission `json:"permissions,omitempty"` Permissions []Permission `json:"permissions,omitempty"`
ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"` ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"`
URL string `json:"url,omitempty" bundle:"internal"`
} }
func (s *ModelServingEndpoint) UnmarshalJSON(b []byte) error { func (s *ModelServingEndpoint) UnmarshalJSON(b []byte) error {
@ -47,3 +50,19 @@ func (s *ModelServingEndpoint) Exists(ctx context.Context, w *databricks.Workspa
func (s *ModelServingEndpoint) TerraformResourceName() string { func (s *ModelServingEndpoint) TerraformResourceName() string {
return "databricks_model_serving" return "databricks_model_serving"
} }
func (s *ModelServingEndpoint) InitializeURL(baseURL url.URL) {
if s.ID == "" {
return
}
baseURL.Path = fmt.Sprintf("ml/endpoints/%s", s.ID)
s.URL = baseURL.String()
}
func (s *ModelServingEndpoint) GetName() string {
return s.Name
}
func (s *ModelServingEndpoint) GetURL() string {
return s.URL
}

View File

@ -1,5 +1,7 @@
package resources package resources
import "fmt"
// Permission holds the permission level setting for a single principal. // Permission holds the permission level setting for a single principal.
// Multiple of these can be defined on any resource. // Multiple of these can be defined on any resource.
type Permission struct { type Permission struct {
@ -9,3 +11,19 @@ type Permission struct {
ServicePrincipalName string `json:"service_principal_name,omitempty"` ServicePrincipalName string `json:"service_principal_name,omitempty"`
GroupName string `json:"group_name,omitempty"` GroupName string `json:"group_name,omitempty"`
} }
func (p Permission) String() string {
if p.UserName != "" {
return fmt.Sprintf("level: %s, user_name: %s", p.Level, p.UserName)
}
if p.ServicePrincipalName != "" {
return fmt.Sprintf("level: %s, service_principal_name: %s", p.Level, p.ServicePrincipalName)
}
if p.GroupName != "" {
return fmt.Sprintf("level: %s, group_name: %s", p.Level, p.GroupName)
}
return fmt.Sprintf("level: %s", p.Level)
}

View File

@ -2,6 +2,8 @@ package resources
import ( import (
"context" "context"
"fmt"
"net/url"
"github.com/databricks/cli/libs/log" "github.com/databricks/cli/libs/log"
"github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go"
@ -13,6 +15,7 @@ type Pipeline struct {
ID string `json:"id,omitempty" bundle:"readonly"` ID string `json:"id,omitempty" bundle:"readonly"`
Permissions []Permission `json:"permissions,omitempty"` Permissions []Permission `json:"permissions,omitempty"`
ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"` ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"`
URL string `json:"url,omitempty" bundle:"internal"`
*pipelines.PipelineSpec *pipelines.PipelineSpec
} }
@ -39,3 +42,19 @@ func (p *Pipeline) Exists(ctx context.Context, w *databricks.WorkspaceClient, id
func (p *Pipeline) TerraformResourceName() string { func (p *Pipeline) TerraformResourceName() string {
return "databricks_pipeline" return "databricks_pipeline"
} }
func (p *Pipeline) InitializeURL(baseURL url.URL) {
if p.ID == "" {
return
}
baseURL.Path = fmt.Sprintf("pipelines/%s", p.ID)
p.URL = baseURL.String()
}
func (p *Pipeline) GetName() string {
return p.Name
}
func (s *Pipeline) GetURL() string {
return s.URL
}

View File

@ -2,6 +2,9 @@ package resources
import ( import (
"context" "context"
"fmt"
"net/url"
"strings"
"github.com/databricks/cli/libs/log" "github.com/databricks/cli/libs/log"
"github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go"
@ -20,6 +23,7 @@ type QualityMonitor struct {
ID string `json:"id,omitempty" bundle:"readonly"` ID string `json:"id,omitempty" bundle:"readonly"`
ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"` ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"`
URL string `json:"url,omitempty" bundle:"internal"`
} }
func (s *QualityMonitor) UnmarshalJSON(b []byte) error { func (s *QualityMonitor) UnmarshalJSON(b []byte) error {
@ -44,3 +48,19 @@ func (s *QualityMonitor) Exists(ctx context.Context, w *databricks.WorkspaceClie
func (s *QualityMonitor) TerraformResourceName() string { func (s *QualityMonitor) TerraformResourceName() string {
return "databricks_quality_monitor" return "databricks_quality_monitor"
} }
func (s *QualityMonitor) InitializeURL(baseURL url.URL) {
if s.TableName == "" {
return
}
baseURL.Path = fmt.Sprintf("explore/data/%s", strings.ReplaceAll(s.TableName, ".", "/"))
s.URL = baseURL.String()
}
func (s *QualityMonitor) GetName() string {
return s.TableName
}
func (s *QualityMonitor) GetURL() string {
return s.URL
}

View File

@ -2,6 +2,9 @@ package resources
import ( import (
"context" "context"
"fmt"
"net/url"
"strings"
"github.com/databricks/cli/libs/log" "github.com/databricks/cli/libs/log"
"github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go"
@ -24,6 +27,7 @@ type RegisteredModel struct {
*catalog.CreateRegisteredModelRequest *catalog.CreateRegisteredModelRequest
ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"` ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"`
URL string `json:"url,omitempty" bundle:"internal"`
} }
func (s *RegisteredModel) UnmarshalJSON(b []byte) error { func (s *RegisteredModel) UnmarshalJSON(b []byte) error {
@ -48,3 +52,19 @@ func (s *RegisteredModel) Exists(ctx context.Context, w *databricks.WorkspaceCli
func (s *RegisteredModel) TerraformResourceName() string { func (s *RegisteredModel) TerraformResourceName() string {
return "databricks_registered_model" return "databricks_registered_model"
} }
func (s *RegisteredModel) InitializeURL(baseURL url.URL) {
if s.ID == "" {
return
}
baseURL.Path = fmt.Sprintf("explore/data/models/%s", strings.ReplaceAll(s.ID, ".", "/"))
s.URL = baseURL.String()
}
func (s *RegisteredModel) GetName() string {
return s.Name
}
func (s *RegisteredModel) GetURL() string {
return s.URL
}

View File

@ -1,6 +1,12 @@
package resources package resources
import ( import (
"context"
"fmt"
"net/url"
"strings"
"github.com/databricks/databricks-sdk-go"
"github.com/databricks/databricks-sdk-go/marshal" "github.com/databricks/databricks-sdk-go/marshal"
"github.com/databricks/databricks-sdk-go/service/catalog" "github.com/databricks/databricks-sdk-go/service/catalog"
) )
@ -16,6 +22,31 @@ type Schema struct {
*catalog.CreateSchema *catalog.CreateSchema
ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"` ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"`
URL string `json:"url,omitempty" bundle:"internal"`
}
func (s *Schema) Exists(ctx context.Context, w *databricks.WorkspaceClient, id string) (bool, error) {
return false, fmt.Errorf("schema.Exists() is not supported")
}
func (s *Schema) TerraformResourceName() string {
return "databricks_schema"
}
func (s *Schema) InitializeURL(baseURL url.URL) {
if s.ID == "" {
return
}
baseURL.Path = fmt.Sprintf("explore/data/%s", strings.ReplaceAll(s.ID, ".", "/"))
s.URL = baseURL.String()
}
func (s *Schema) GetURL() string {
return s.URL
}
func (s *Schema) GetName() string {
return s.Name
} }
func (s *Schema) UnmarshalJSON(b []byte) error { func (s *Schema) UnmarshalJSON(b []byte) error {

View File

@ -3,6 +3,7 @@ package config
import ( import (
"encoding/json" "encoding/json"
"reflect" "reflect"
"strings"
"testing" "testing"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
@ -61,3 +62,38 @@ func TestCustomMarshallerIsImplemented(t *testing.T) {
}, "Resource %s does not have a custom unmarshaller", field.Name) }, "Resource %s does not have a custom unmarshaller", field.Name)
} }
} }
func TestResourcesAllResourcesCompleteness(t *testing.T) {
r := Resources{}
rt := reflect.TypeOf(r)
// Collect set of includes resource types
var types []string
for _, group := range r.AllResources() {
types = append(types, group.Description.PluralName)
}
for i := 0; i < rt.NumField(); i++ {
field := rt.Field(i)
jsonTag := field.Tag.Get("json")
if idx := strings.Index(jsonTag, ","); idx != -1 {
jsonTag = jsonTag[:idx]
}
assert.Contains(t, types, jsonTag, "Field %s is missing in AllResources", field.Name)
}
}
func TestSupportedResources(t *testing.T) {
// Please add your resource to the SupportedResources() function in resources.go if you add a new resource.
actual := SupportedResources()
typ := reflect.TypeOf(Resources{})
for i := 0; i < typ.NumField(); i++ {
field := typ.Field(i)
jsonTags := strings.Split(field.Tag.Get("json"), ",")
pluralName := jsonTags[0]
assert.Equal(t, actual[pluralName].PluralName, pluralName)
}
}

View File

@ -0,0 +1,100 @@
package validate
import (
"context"
"fmt"
"path"
"github.com/databricks/cli/bundle"
"github.com/databricks/cli/bundle/paths"
"github.com/databricks/cli/bundle/permissions"
"github.com/databricks/cli/libs/diag"
"github.com/databricks/databricks-sdk-go/apierr"
"github.com/databricks/databricks-sdk-go/service/workspace"
"golang.org/x/sync/errgroup"
)
type folderPermissions struct {
}
// Apply implements bundle.ReadOnlyMutator.
func (f *folderPermissions) Apply(ctx context.Context, b bundle.ReadOnlyBundle) diag.Diagnostics {
if len(b.Config().Permissions) == 0 {
return nil
}
bundlePaths := paths.CollectUniqueWorkspacePathPrefixes(b.Config().Workspace)
var diags diag.Diagnostics
g, ctx := errgroup.WithContext(ctx)
results := make([]diag.Diagnostics, len(bundlePaths))
for i, p := range bundlePaths {
g.Go(func() error {
results[i] = checkFolderPermission(ctx, b, p)
return nil
})
}
if err := g.Wait(); err != nil {
return diag.FromErr(err)
}
for _, r := range results {
diags = diags.Extend(r)
}
return diags
}
func checkFolderPermission(ctx context.Context, b bundle.ReadOnlyBundle, folderPath string) diag.Diagnostics {
w := b.WorkspaceClient().Workspace
obj, err := getClosestExistingObject(ctx, w, folderPath)
if err != nil {
return diag.FromErr(err)
}
objPermissions, err := w.GetPermissions(ctx, workspace.GetWorkspaceObjectPermissionsRequest{
WorkspaceObjectId: fmt.Sprint(obj.ObjectId),
WorkspaceObjectType: "directories",
})
if err != nil {
return diag.FromErr(err)
}
p := permissions.ObjectAclToResourcePermissions(folderPath, objPermissions.AccessControlList)
return p.Compare(b.Config().Permissions)
}
func getClosestExistingObject(ctx context.Context, w workspace.WorkspaceInterface, folderPath string) (*workspace.ObjectInfo, error) {
for {
obj, err := w.GetStatusByPath(ctx, folderPath)
if err == nil {
return obj, nil
}
if !apierr.IsMissing(err) {
return nil, err
}
parent := path.Dir(folderPath)
// If the parent is the same as the current folder, then we have reached the root
if folderPath == parent {
break
}
folderPath = parent
}
return nil, fmt.Errorf("folder %s and its parent folders do not exist", folderPath)
}
// Name implements bundle.ReadOnlyMutator.
func (f *folderPermissions) Name() string {
return "validate:folder_permissions"
}
// ValidateFolderPermissions validates that permissions for the folders in Workspace file system matches
// the permissions in the top-level permissions section of the bundle.
func ValidateFolderPermissions() bundle.ReadOnlyMutator {
return &folderPermissions{}
}

View File

@ -0,0 +1,208 @@
package validate
import (
"context"
"testing"
"github.com/databricks/cli/bundle"
"github.com/databricks/cli/bundle/config"
"github.com/databricks/cli/bundle/config/resources"
"github.com/databricks/cli/bundle/permissions"
"github.com/databricks/cli/libs/diag"
"github.com/databricks/databricks-sdk-go/apierr"
"github.com/databricks/databricks-sdk-go/experimental/mocks"
"github.com/databricks/databricks-sdk-go/service/workspace"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
)
func TestFolderPermissionsInheritedWhenRootPathDoesNotExist(t *testing.T) {
b := &bundle.Bundle{
Config: config.Root{
Workspace: config.Workspace{
RootPath: "/Workspace/Users/foo@bar.com",
ArtifactPath: "/Workspace/Users/otherfoo@bar.com/artifacts",
FilePath: "/Workspace/Users/foo@bar.com/files",
StatePath: "/Workspace/Users/foo@bar.com/state",
ResourcePath: "/Workspace/Users/foo@bar.com/resources",
},
Permissions: []resources.Permission{
{Level: permissions.CAN_MANAGE, UserName: "foo@bar.com"},
},
},
}
m := mocks.NewMockWorkspaceClient(t)
api := m.GetMockWorkspaceAPI()
api.EXPECT().GetStatusByPath(mock.Anything, "/Workspace/Users/otherfoo@bar.com/artifacts").Return(nil, &apierr.APIError{
StatusCode: 404,
ErrorCode: "RESOURCE_DOES_NOT_EXIST",
})
api.EXPECT().GetStatusByPath(mock.Anything, "/Workspace/Users/otherfoo@bar.com").Return(nil, &apierr.APIError{
StatusCode: 404,
ErrorCode: "RESOURCE_DOES_NOT_EXIST",
})
api.EXPECT().GetStatusByPath(mock.Anything, "/Workspace/Users/foo@bar.com").Return(nil, &apierr.APIError{
StatusCode: 404,
ErrorCode: "RESOURCE_DOES_NOT_EXIST",
})
api.EXPECT().GetStatusByPath(mock.Anything, "/Workspace/Users").Return(nil, &apierr.APIError{
StatusCode: 404,
ErrorCode: "RESOURCE_DOES_NOT_EXIST",
})
api.EXPECT().GetStatusByPath(mock.Anything, "/Workspace").Return(&workspace.ObjectInfo{
ObjectId: 1234,
}, nil)
api.EXPECT().GetPermissions(mock.Anything, workspace.GetWorkspaceObjectPermissionsRequest{
WorkspaceObjectId: "1234",
WorkspaceObjectType: "directories",
}).Return(&workspace.WorkspaceObjectPermissions{
ObjectId: "1234",
AccessControlList: []workspace.WorkspaceObjectAccessControlResponse{
{
UserName: "foo@bar.com",
AllPermissions: []workspace.WorkspaceObjectPermission{
{PermissionLevel: "CAN_MANAGE"},
},
},
},
}, nil)
b.SetWorkpaceClient(m.WorkspaceClient)
rb := bundle.ReadOnly(b)
diags := bundle.ApplyReadOnly(context.Background(), rb, ValidateFolderPermissions())
require.Empty(t, diags)
}
func TestValidateFolderPermissionsFailsOnMissingBundlePermission(t *testing.T) {
b := &bundle.Bundle{
Config: config.Root{
Workspace: config.Workspace{
RootPath: "/Workspace/Users/foo@bar.com",
ArtifactPath: "/Workspace/Users/foo@bar.com/artifacts",
FilePath: "/Workspace/Users/foo@bar.com/files",
StatePath: "/Workspace/Users/foo@bar.com/state",
ResourcePath: "/Workspace/Users/foo@bar.com/resources",
},
Permissions: []resources.Permission{
{Level: permissions.CAN_MANAGE, UserName: "foo@bar.com"},
},
},
}
m := mocks.NewMockWorkspaceClient(t)
api := m.GetMockWorkspaceAPI()
api.EXPECT().GetStatusByPath(mock.Anything, "/Workspace/Users/foo@bar.com").Return(&workspace.ObjectInfo{
ObjectId: 1234,
}, nil)
api.EXPECT().GetPermissions(mock.Anything, workspace.GetWorkspaceObjectPermissionsRequest{
WorkspaceObjectId: "1234",
WorkspaceObjectType: "directories",
}).Return(&workspace.WorkspaceObjectPermissions{
ObjectId: "1234",
AccessControlList: []workspace.WorkspaceObjectAccessControlResponse{
{
UserName: "foo@bar.com",
AllPermissions: []workspace.WorkspaceObjectPermission{
{PermissionLevel: "CAN_MANAGE"},
},
},
{
UserName: "foo2@bar.com",
AllPermissions: []workspace.WorkspaceObjectPermission{
{PermissionLevel: "CAN_MANAGE"},
},
},
},
}, nil)
b.SetWorkpaceClient(m.WorkspaceClient)
rb := bundle.ReadOnly(b)
diags := bundle.ApplyReadOnly(context.Background(), rb, ValidateFolderPermissions())
require.Len(t, diags, 1)
require.Equal(t, "untracked permissions apply to target workspace path", diags[0].Summary)
require.Equal(t, diag.Warning, diags[0].Severity)
require.Equal(t, "The following permissions apply to the workspace folder at \"/Workspace/Users/foo@bar.com\" but are not configured in the bundle:\n- level: CAN_MANAGE, user_name: foo2@bar.com\n", diags[0].Detail)
}
func TestValidateFolderPermissionsFailsOnPermissionMismatch(t *testing.T) {
b := &bundle.Bundle{
Config: config.Root{
Workspace: config.Workspace{
RootPath: "/Workspace/Users/foo@bar.com",
ArtifactPath: "/Workspace/Users/foo@bar.com/artifacts",
FilePath: "/Workspace/Users/foo@bar.com/files",
StatePath: "/Workspace/Users/foo@bar.com/state",
ResourcePath: "/Workspace/Users/foo@bar.com/resources",
},
Permissions: []resources.Permission{
{Level: permissions.CAN_MANAGE, UserName: "foo@bar.com"},
},
},
}
m := mocks.NewMockWorkspaceClient(t)
api := m.GetMockWorkspaceAPI()
api.EXPECT().GetStatusByPath(mock.Anything, "/Workspace/Users/foo@bar.com").Return(&workspace.ObjectInfo{
ObjectId: 1234,
}, nil)
api.EXPECT().GetPermissions(mock.Anything, workspace.GetWorkspaceObjectPermissionsRequest{
WorkspaceObjectId: "1234",
WorkspaceObjectType: "directories",
}).Return(&workspace.WorkspaceObjectPermissions{
ObjectId: "1234",
AccessControlList: []workspace.WorkspaceObjectAccessControlResponse{
{
UserName: "foo2@bar.com",
AllPermissions: []workspace.WorkspaceObjectPermission{
{PermissionLevel: "CAN_MANAGE"},
},
},
},
}, nil)
b.SetWorkpaceClient(m.WorkspaceClient)
rb := bundle.ReadOnly(b)
diags := bundle.ApplyReadOnly(context.Background(), rb, ValidateFolderPermissions())
require.Len(t, diags, 1)
require.Equal(t, "untracked permissions apply to target workspace path", diags[0].Summary)
require.Equal(t, diag.Warning, diags[0].Severity)
}
func TestValidateFolderPermissionsFailsOnNoRootFolder(t *testing.T) {
b := &bundle.Bundle{
Config: config.Root{
Workspace: config.Workspace{
RootPath: "/NotExisting",
ArtifactPath: "/NotExisting/artifacts",
FilePath: "/NotExisting/files",
StatePath: "/NotExisting/state",
ResourcePath: "/NotExisting/resources",
},
Permissions: []resources.Permission{
{Level: permissions.CAN_MANAGE, UserName: "foo@bar.com"},
},
},
}
m := mocks.NewMockWorkspaceClient(t)
api := m.GetMockWorkspaceAPI()
api.EXPECT().GetStatusByPath(mock.Anything, "/NotExisting").Return(nil, &apierr.APIError{
StatusCode: 404,
ErrorCode: "RESOURCE_DOES_NOT_EXIST",
})
api.EXPECT().GetStatusByPath(mock.Anything, "/").Return(nil, &apierr.APIError{
StatusCode: 404,
ErrorCode: "RESOURCE_DOES_NOT_EXIST",
})
b.SetWorkpaceClient(m.WorkspaceClient)
rb := bundle.ReadOnly(b)
diags := bundle.ApplyReadOnly(context.Background(), rb, ValidateFolderPermissions())
require.Len(t, diags, 1)
require.Equal(t, "folder / and its parent folders do not exist", diags[0].Summary)
require.Equal(t, diag.Error, diags[0].Severity)
}

View File

@ -35,6 +35,7 @@ func (v *validate) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics
FilesToSync(), FilesToSync(),
ValidateSyncPatterns(), ValidateSyncPatterns(),
JobTaskClusterSpec(), JobTaskClusterSpec(),
ValidateFolderPermissions(),
)) ))
} }

View File

@ -2,9 +2,12 @@ package files
import ( import (
"context" "context"
"errors"
"fmt" "fmt"
"io/fs"
"github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle"
"github.com/databricks/cli/bundle/permissions"
"github.com/databricks/cli/libs/cmdio" "github.com/databricks/cli/libs/cmdio"
"github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/diag"
"github.com/databricks/cli/libs/log" "github.com/databricks/cli/libs/log"
@ -35,6 +38,9 @@ func (m *upload) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
b.Files, err = sync.RunOnce(ctx) b.Files, err = sync.RunOnce(ctx)
if err != nil { if err != nil {
if errors.Is(err, fs.ErrPermission) {
return permissions.ReportPossiblePermissionDenied(ctx, b, b.Config.Workspace.FilePath)
}
return diag.FromErr(err) return diag.FromErr(err)
} }

View File

@ -3,8 +3,10 @@ package lock
import ( import (
"context" "context"
"errors" "errors"
"io/fs"
"github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle"
"github.com/databricks/cli/bundle/permissions"
"github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/diag"
"github.com/databricks/cli/libs/filer" "github.com/databricks/cli/libs/filer"
"github.com/databricks/cli/libs/locker" "github.com/databricks/cli/libs/locker"
@ -51,12 +53,17 @@ func (m *acquire) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics
if err != nil { if err != nil {
log.Errorf(ctx, "Failed to acquire deployment lock: %v", err) log.Errorf(ctx, "Failed to acquire deployment lock: %v", err)
if errors.Is(err, fs.ErrPermission) {
return permissions.ReportPossiblePermissionDenied(ctx, b, b.Config.Workspace.StatePath)
}
notExistsError := filer.NoSuchDirectoryError{} notExistsError := filer.NoSuchDirectoryError{}
if errors.As(err, &notExistsError) { if errors.As(err, &notExistsError) {
// If we get a "doesn't exist" error from the API this indicates // If we get a "doesn't exist" error from the API this indicates
// we either don't have permissions or the path is invalid. // we either don't have permissions or the path is invalid.
return diag.Errorf("cannot write to deployment root (this can indicate a previous deploy was done with a different identity): %s", b.Config.Workspace.RootPath) return permissions.ReportPossiblePermissionDenied(ctx, b, b.Config.Workspace.StatePath)
} }
return diag.FromErr(err) return diag.FromErr(err)
} }

View File

@ -4,6 +4,7 @@ import (
"context" "context"
"github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle"
"github.com/databricks/cli/bundle/permissions"
"github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/diag"
"github.com/databricks/cli/libs/log" "github.com/databricks/cli/libs/log"
"github.com/hashicorp/terraform-exec/tfexec" "github.com/hashicorp/terraform-exec/tfexec"
@ -34,6 +35,10 @@ func (w *apply) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
// Apply terraform according to the computed plan // Apply terraform according to the computed plan
err := tf.Apply(ctx, tfexec.DirOrPlan(b.Plan.Path)) err := tf.Apply(ctx, tfexec.DirOrPlan(b.Plan.Path))
if err != nil { if err != nil {
diags := permissions.TryExtendTerraformPermissionError(ctx, b, err)
if diags != nil {
return diags
}
return diag.Errorf("terraform apply: %v", err) return diag.Errorf("terraform apply: %v", err)
} }

View File

@ -0,0 +1,117 @@
package terraform
import (
"context"
"fmt"
"github.com/databricks/cli/bundle"
"github.com/databricks/cli/libs/diag"
"github.com/databricks/cli/libs/dyn"
tfjson "github.com/hashicorp/terraform-json"
)
type dashboardState struct {
Name string
ID string
ETag string
}
func collectDashboardsFromState(ctx context.Context, b *bundle.Bundle) ([]dashboardState, error) {
state, err := ParseResourcesState(ctx, b)
if err != nil && state == nil {
return nil, err
}
var dashboards []dashboardState
for _, resource := range state.Resources {
if resource.Mode != tfjson.ManagedResourceMode {
continue
}
for _, instance := range resource.Instances {
switch resource.Type {
case "databricks_dashboard":
dashboards = append(dashboards, dashboardState{
Name: resource.Name,
ID: instance.Attributes.ID,
ETag: instance.Attributes.ETag,
})
}
}
}
return dashboards, nil
}
type checkDashboardsModifiedRemotely struct {
}
func (l *checkDashboardsModifiedRemotely) Name() string {
return "CheckDashboardsModifiedRemotely"
}
func (l *checkDashboardsModifiedRemotely) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
// This mutator is relevant only if the bundle includes dashboards.
if len(b.Config.Resources.Dashboards) == 0 {
return nil
}
// If the user has forced the deployment, skip this check.
if b.Config.Bundle.Force {
return nil
}
dashboards, err := collectDashboardsFromState(ctx, b)
if err != nil {
return diag.FromErr(err)
}
var diags diag.Diagnostics
for _, dashboard := range dashboards {
// Skip dashboards that are not defined in the bundle.
// These will be destroyed upon deployment.
if _, ok := b.Config.Resources.Dashboards[dashboard.Name]; !ok {
continue
}
path := dyn.MustPathFromString(fmt.Sprintf("resources.dashboards.%s", dashboard.Name))
loc := b.Config.GetLocation(path.String())
actual, err := b.WorkspaceClient().Lakeview.GetByDashboardId(ctx, dashboard.ID)
if err != nil {
diags = diags.Append(diag.Diagnostic{
Severity: diag.Error,
Summary: fmt.Sprintf("failed to get dashboard %q", dashboard.Name),
Detail: err.Error(),
Paths: []dyn.Path{path},
Locations: []dyn.Location{loc},
})
continue
}
// If the ETag is the same, the dashboard has not been modified.
if actual.Etag == dashboard.ETag {
continue
}
diags = diags.Append(diag.Diagnostic{
Severity: diag.Error,
Summary: fmt.Sprintf("dashboard %q has been modified remotely", dashboard.Name),
Detail: "" +
"This dashboard has been modified remotely since the last bundle deployment.\n" +
"These modifications are untracked and will be overwritten on deploy.\n" +
"\n" +
"Make sure that the local dashboard definition matches what you intend to deploy\n" +
"before proceeding with the deployment.\n" +
"\n" +
"Run `databricks bundle deploy --force` to bypass this error." +
"",
Paths: []dyn.Path{path},
Locations: []dyn.Location{loc},
})
}
return diags
}
func CheckDashboardsModifiedRemotely() *checkDashboardsModifiedRemotely {
return &checkDashboardsModifiedRemotely{}
}

View File

@ -0,0 +1,191 @@
package terraform
import (
"context"
"fmt"
"path/filepath"
"testing"
"github.com/databricks/cli/bundle"
"github.com/databricks/cli/bundle/config"
"github.com/databricks/cli/bundle/config/resources"
"github.com/databricks/cli/internal/testutil"
"github.com/databricks/cli/libs/diag"
"github.com/databricks/databricks-sdk-go/experimental/mocks"
"github.com/databricks/databricks-sdk-go/service/dashboards"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
)
func mockDashboardBundle(t *testing.T) *bundle.Bundle {
dir := t.TempDir()
b := &bundle.Bundle{
BundleRootPath: dir,
Config: config.Root{
Bundle: config.Bundle{
Target: "test",
},
Resources: config.Resources{
Dashboards: map[string]*resources.Dashboard{
"dash1": {
CreateDashboardRequest: &dashboards.CreateDashboardRequest{
DisplayName: "My Special Dashboard",
},
},
},
},
},
}
return b
}
func TestCheckDashboardsModifiedRemotely_NoDashboards(t *testing.T) {
dir := t.TempDir()
b := &bundle.Bundle{
BundleRootPath: dir,
Config: config.Root{
Bundle: config.Bundle{
Target: "test",
},
Resources: config.Resources{},
},
}
diags := bundle.Apply(context.Background(), b, CheckDashboardsModifiedRemotely())
assert.Empty(t, diags)
}
func TestCheckDashboardsModifiedRemotely_FirstDeployment(t *testing.T) {
b := mockDashboardBundle(t)
diags := bundle.Apply(context.Background(), b, CheckDashboardsModifiedRemotely())
assert.Empty(t, diags)
}
func TestCheckDashboardsModifiedRemotely_ExistingStateNoChange(t *testing.T) {
ctx := context.Background()
b := mockDashboardBundle(t)
writeFakeDashboardState(t, ctx, b)
// Mock the call to the API.
m := mocks.NewMockWorkspaceClient(t)
dashboardsAPI := m.GetMockLakeviewAPI()
dashboardsAPI.EXPECT().
GetByDashboardId(mock.Anything, "id1").
Return(&dashboards.Dashboard{
DisplayName: "My Special Dashboard",
Etag: "1000",
}, nil).
Once()
b.SetWorkpaceClient(m.WorkspaceClient)
// No changes, so no diags.
diags := bundle.Apply(ctx, b, CheckDashboardsModifiedRemotely())
assert.Empty(t, diags)
}
func TestCheckDashboardsModifiedRemotely_ExistingStateChange(t *testing.T) {
ctx := context.Background()
b := mockDashboardBundle(t)
writeFakeDashboardState(t, ctx, b)
// Mock the call to the API.
m := mocks.NewMockWorkspaceClient(t)
dashboardsAPI := m.GetMockLakeviewAPI()
dashboardsAPI.EXPECT().
GetByDashboardId(mock.Anything, "id1").
Return(&dashboards.Dashboard{
DisplayName: "My Special Dashboard",
Etag: "1234",
}, nil).
Once()
b.SetWorkpaceClient(m.WorkspaceClient)
// The dashboard has changed, so expect an error.
diags := bundle.Apply(ctx, b, CheckDashboardsModifiedRemotely())
if assert.Len(t, diags, 1) {
assert.Equal(t, diag.Error, diags[0].Severity)
assert.Equal(t, `dashboard "dash1" has been modified remotely`, diags[0].Summary)
}
}
func TestCheckDashboardsModifiedRemotely_ExistingStateFailureToGet(t *testing.T) {
ctx := context.Background()
b := mockDashboardBundle(t)
writeFakeDashboardState(t, ctx, b)
// Mock the call to the API.
m := mocks.NewMockWorkspaceClient(t)
dashboardsAPI := m.GetMockLakeviewAPI()
dashboardsAPI.EXPECT().
GetByDashboardId(mock.Anything, "id1").
Return(nil, fmt.Errorf("failure")).
Once()
b.SetWorkpaceClient(m.WorkspaceClient)
// Unable to get the dashboard, so expect an error.
diags := bundle.Apply(ctx, b, CheckDashboardsModifiedRemotely())
if assert.Len(t, diags, 1) {
assert.Equal(t, diag.Error, diags[0].Severity)
assert.Equal(t, `failed to get dashboard "dash1"`, diags[0].Summary)
}
}
func writeFakeDashboardState(t *testing.T, ctx context.Context, b *bundle.Bundle) {
tfDir, err := Dir(ctx, b)
require.NoError(t, err)
// Write fake state file.
testutil.WriteFile(t, `
{
"version": 4,
"terraform_version": "1.5.5",
"resources": [
{
"mode": "managed",
"type": "databricks_dashboard",
"name": "dash1",
"instances": [
{
"schema_version": 0,
"attributes": {
"etag": "1000",
"id": "id1"
}
}
]
},
{
"mode": "managed",
"type": "databricks_job",
"name": "job",
"instances": [
{
"schema_version": 0,
"attributes": {
"id": "1234"
}
}
]
},
{
"mode": "managed",
"type": "databricks_dashboard",
"name": "dash2",
"instances": [
{
"schema_version": 0,
"attributes": {
"etag": "1001",
"id": "id2"
}
}
]
}
]
}
`, filepath.Join(tfDir, TerraformStateFileName))
}

View File

@ -2,9 +2,7 @@ package terraform
import ( import (
"context" "context"
"encoding/json"
"fmt" "fmt"
"sort"
"github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config"
"github.com/databricks/cli/bundle/config/resources" "github.com/databricks/cli/bundle/config/resources"
@ -14,244 +12,6 @@ import (
tfjson "github.com/hashicorp/terraform-json" tfjson "github.com/hashicorp/terraform-json"
) )
func conv(from any, to any) {
buf, _ := json.Marshal(from)
json.Unmarshal(buf, &to)
}
func convPermissions(acl []resources.Permission) *schema.ResourcePermissions {
if len(acl) == 0 {
return nil
}
resource := schema.ResourcePermissions{}
for _, ac := range acl {
resource.AccessControl = append(resource.AccessControl, convPermission(ac))
}
return &resource
}
func convPermission(ac resources.Permission) schema.ResourcePermissionsAccessControl {
dst := schema.ResourcePermissionsAccessControl{
PermissionLevel: ac.Level,
}
if ac.UserName != "" {
dst.UserName = ac.UserName
}
if ac.GroupName != "" {
dst.GroupName = ac.GroupName
}
if ac.ServicePrincipalName != "" {
dst.ServicePrincipalName = ac.ServicePrincipalName
}
return dst
}
func convGrants(acl []resources.Grant) *schema.ResourceGrants {
if len(acl) == 0 {
return nil
}
resource := schema.ResourceGrants{}
for _, ac := range acl {
resource.Grant = append(resource.Grant, schema.ResourceGrantsGrant{
Privileges: ac.Privileges,
Principal: ac.Principal,
})
}
return &resource
}
// BundleToTerraform converts resources in a bundle configuration
// to the equivalent Terraform JSON representation.
//
// Note: This function is an older implementation of the conversion logic. It is
// no longer used in any code paths. It is kept around to be used in tests.
// New resources do not need to modify this function and can instead can define
// the conversion login in the tfdyn package.
func BundleToTerraform(config *config.Root) *schema.Root {
tfroot := schema.NewRoot()
tfroot.Provider = schema.NewProviders()
tfroot.Resource = schema.NewResources()
noResources := true
for k, src := range config.Resources.Jobs {
noResources = false
var dst schema.ResourceJob
conv(src, &dst)
if src.JobSettings != nil {
sort.Slice(src.JobSettings.Tasks, func(i, j int) bool {
return src.JobSettings.Tasks[i].TaskKey < src.JobSettings.Tasks[j].TaskKey
})
for _, v := range src.Tasks {
var t schema.ResourceJobTask
conv(v, &t)
for _, v_ := range v.Libraries {
var l schema.ResourceJobTaskLibrary
conv(v_, &l)
t.Library = append(t.Library, l)
}
// Convert for_each_task libraries
if v.ForEachTask != nil {
for _, v_ := range v.ForEachTask.Task.Libraries {
var l schema.ResourceJobTaskForEachTaskTaskLibrary
conv(v_, &l)
t.ForEachTask.Task.Library = append(t.ForEachTask.Task.Library, l)
}
}
dst.Task = append(dst.Task, t)
}
for _, v := range src.JobClusters {
var t schema.ResourceJobJobCluster
conv(v, &t)
dst.JobCluster = append(dst.JobCluster, t)
}
// Unblock downstream work. To be addressed more generally later.
if git := src.GitSource; git != nil {
dst.GitSource = &schema.ResourceJobGitSource{
Url: git.GitUrl,
Branch: git.GitBranch,
Commit: git.GitCommit,
Provider: string(git.GitProvider),
Tag: git.GitTag,
}
}
for _, v := range src.Parameters {
var t schema.ResourceJobParameter
conv(v, &t)
dst.Parameter = append(dst.Parameter, t)
}
}
tfroot.Resource.Job[k] = &dst
// Configure permissions for this resource.
if rp := convPermissions(src.Permissions); rp != nil {
rp.JobId = fmt.Sprintf("${databricks_job.%s.id}", k)
tfroot.Resource.Permissions["job_"+k] = rp
}
}
for k, src := range config.Resources.Pipelines {
noResources = false
var dst schema.ResourcePipeline
conv(src, &dst)
if src.PipelineSpec != nil {
for _, v := range src.Libraries {
var l schema.ResourcePipelineLibrary
conv(v, &l)
dst.Library = append(dst.Library, l)
}
for _, v := range src.Clusters {
var l schema.ResourcePipelineCluster
conv(v, &l)
dst.Cluster = append(dst.Cluster, l)
}
for _, v := range src.Notifications {
var l schema.ResourcePipelineNotification
conv(v, &l)
dst.Notification = append(dst.Notification, l)
}
}
tfroot.Resource.Pipeline[k] = &dst
// Configure permissions for this resource.
if rp := convPermissions(src.Permissions); rp != nil {
rp.PipelineId = fmt.Sprintf("${databricks_pipeline.%s.id}", k)
tfroot.Resource.Permissions["pipeline_"+k] = rp
}
}
for k, src := range config.Resources.Models {
noResources = false
var dst schema.ResourceMlflowModel
conv(src, &dst)
tfroot.Resource.MlflowModel[k] = &dst
// Configure permissions for this resource.
if rp := convPermissions(src.Permissions); rp != nil {
rp.RegisteredModelId = fmt.Sprintf("${databricks_mlflow_model.%s.registered_model_id}", k)
tfroot.Resource.Permissions["mlflow_model_"+k] = rp
}
}
for k, src := range config.Resources.Experiments {
noResources = false
var dst schema.ResourceMlflowExperiment
conv(src, &dst)
tfroot.Resource.MlflowExperiment[k] = &dst
// Configure permissions for this resource.
if rp := convPermissions(src.Permissions); rp != nil {
rp.ExperimentId = fmt.Sprintf("${databricks_mlflow_experiment.%s.id}", k)
tfroot.Resource.Permissions["mlflow_experiment_"+k] = rp
}
}
for k, src := range config.Resources.ModelServingEndpoints {
noResources = false
var dst schema.ResourceModelServing
conv(src, &dst)
tfroot.Resource.ModelServing[k] = &dst
// Configure permissions for this resource.
if rp := convPermissions(src.Permissions); rp != nil {
rp.ServingEndpointId = fmt.Sprintf("${databricks_model_serving.%s.serving_endpoint_id}", k)
tfroot.Resource.Permissions["model_serving_"+k] = rp
}
}
for k, src := range config.Resources.RegisteredModels {
noResources = false
var dst schema.ResourceRegisteredModel
conv(src, &dst)
tfroot.Resource.RegisteredModel[k] = &dst
// Configure permissions for this resource.
if rp := convGrants(src.Grants); rp != nil {
rp.Function = fmt.Sprintf("${databricks_registered_model.%s.id}", k)
tfroot.Resource.Grants["registered_model_"+k] = rp
}
}
for k, src := range config.Resources.QualityMonitors {
noResources = false
var dst schema.ResourceQualityMonitor
conv(src, &dst)
tfroot.Resource.QualityMonitor[k] = &dst
}
for k, src := range config.Resources.Clusters {
noResources = false
var dst schema.ResourceCluster
conv(src, &dst)
tfroot.Resource.Cluster[k] = &dst
}
// We explicitly set "resource" to nil to omit it from a JSON encoding.
// This is required because the terraform CLI requires >= 1 resources defined
// if the "resource" property is used in a .tf.json file.
if noResources {
tfroot.Resource = nil
}
return tfroot
}
// BundleToTerraformWithDynValue converts resources in a bundle configuration // BundleToTerraformWithDynValue converts resources in a bundle configuration
// to the equivalent Terraform JSON representation. // to the equivalent Terraform JSON representation.
func BundleToTerraformWithDynValue(ctx context.Context, root dyn.Value) (*schema.Root, error) { func BundleToTerraformWithDynValue(ctx context.Context, root dyn.Value) (*schema.Root, error) {
@ -416,6 +176,16 @@ func TerraformToBundle(state *resourcesState, config *config.Root) error {
} }
cur.ID = instance.Attributes.ID cur.ID = instance.Attributes.ID
config.Resources.Clusters[resource.Name] = cur config.Resources.Clusters[resource.Name] = cur
case "databricks_dashboard":
if config.Resources.Dashboards == nil {
config.Resources.Dashboards = make(map[string]*resources.Dashboard)
}
cur := config.Resources.Dashboards[resource.Name]
if cur == nil {
cur = &resources.Dashboard{ModifiedStatus: resources.ModifiedStatusDeleted}
}
cur.ID = instance.Attributes.ID
config.Resources.Dashboards[resource.Name] = cur
case "databricks_permissions": case "databricks_permissions":
case "databricks_grants": case "databricks_grants":
// Ignore; no need to pull these back into the configuration. // Ignore; no need to pull these back into the configuration.
@ -470,6 +240,11 @@ func TerraformToBundle(state *resourcesState, config *config.Root) error {
src.ModifiedStatus = resources.ModifiedStatusCreated src.ModifiedStatus = resources.ModifiedStatusCreated
} }
} }
for _, src := range config.Resources.Dashboards {
if src.ModifiedStatus == "" && src.ID == "" {
src.ModifiedStatus = resources.ModifiedStatusCreated
}
}
return nil return nil
} }

View File

@ -2,7 +2,6 @@ package terraform
import ( import (
"context" "context"
"encoding/json"
"reflect" "reflect"
"testing" "testing"
@ -13,6 +12,7 @@ import (
"github.com/databricks/cli/libs/dyn/convert" "github.com/databricks/cli/libs/dyn/convert"
"github.com/databricks/databricks-sdk-go/service/catalog" "github.com/databricks/databricks-sdk-go/service/catalog"
"github.com/databricks/databricks-sdk-go/service/compute" "github.com/databricks/databricks-sdk-go/service/compute"
"github.com/databricks/databricks-sdk-go/service/dashboards"
"github.com/databricks/databricks-sdk-go/service/jobs" "github.com/databricks/databricks-sdk-go/service/jobs"
"github.com/databricks/databricks-sdk-go/service/ml" "github.com/databricks/databricks-sdk-go/service/ml"
"github.com/databricks/databricks-sdk-go/service/pipelines" "github.com/databricks/databricks-sdk-go/service/pipelines"
@ -21,6 +21,27 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
func produceTerraformConfiguration(t *testing.T, config config.Root) *schema.Root {
vin, err := convert.FromTyped(config, dyn.NilValue)
require.NoError(t, err)
out, err := BundleToTerraformWithDynValue(context.Background(), vin)
require.NoError(t, err)
return out
}
func convertToResourceStruct[T any](t *testing.T, resource *T, data any) {
require.NotNil(t, resource)
require.NotNil(t, data)
// Convert data to a dyn.Value.
vin, err := convert.FromTyped(data, dyn.NilValue)
require.NoError(t, err)
// Convert the dyn.Value to a struct.
err = convert.ToTyped(resource, vin)
require.NoError(t, err)
}
func TestBundleToTerraformJob(t *testing.T) { func TestBundleToTerraformJob(t *testing.T) {
var src = resources.Job{ var src = resources.Job{
JobSettings: &jobs.JobSettings{ JobSettings: &jobs.JobSettings{
@ -58,8 +79,9 @@ func TestBundleToTerraformJob(t *testing.T) {
}, },
} }
out := BundleToTerraform(&config) var resource schema.ResourceJob
resource := out.Resource.Job["my_job"].(*schema.ResourceJob) out := produceTerraformConfiguration(t, config)
convertToResourceStruct(t, &resource, out.Resource.Job["my_job"])
assert.Equal(t, "my job", resource.Name) assert.Equal(t, "my job", resource.Name)
assert.Len(t, resource.JobCluster, 1) assert.Len(t, resource.JobCluster, 1)
@ -68,8 +90,6 @@ func TestBundleToTerraformJob(t *testing.T) {
assert.Equal(t, "param1", resource.Parameter[0].Name) assert.Equal(t, "param1", resource.Parameter[0].Name)
assert.Equal(t, "param2", resource.Parameter[1].Name) assert.Equal(t, "param2", resource.Parameter[1].Name)
assert.Nil(t, out.Data) assert.Nil(t, out.Data)
bundleToTerraformEquivalenceTest(t, &config)
} }
func TestBundleToTerraformJobPermissions(t *testing.T) { func TestBundleToTerraformJobPermissions(t *testing.T) {
@ -90,15 +110,14 @@ func TestBundleToTerraformJobPermissions(t *testing.T) {
}, },
} }
out := BundleToTerraform(&config) var resource schema.ResourcePermissions
resource := out.Resource.Permissions["job_my_job"].(*schema.ResourcePermissions) out := produceTerraformConfiguration(t, config)
convertToResourceStruct(t, &resource, out.Resource.Permissions["job_my_job"])
assert.NotEmpty(t, resource.JobId) assert.NotEmpty(t, resource.JobId)
assert.Len(t, resource.AccessControl, 1) assert.Len(t, resource.AccessControl, 1)
assert.Equal(t, "jane@doe.com", resource.AccessControl[0].UserName) assert.Equal(t, "jane@doe.com", resource.AccessControl[0].UserName)
assert.Equal(t, "CAN_VIEW", resource.AccessControl[0].PermissionLevel) assert.Equal(t, "CAN_VIEW", resource.AccessControl[0].PermissionLevel)
bundleToTerraformEquivalenceTest(t, &config)
} }
func TestBundleToTerraformJobTaskLibraries(t *testing.T) { func TestBundleToTerraformJobTaskLibraries(t *testing.T) {
@ -128,15 +147,14 @@ func TestBundleToTerraformJobTaskLibraries(t *testing.T) {
}, },
} }
out := BundleToTerraform(&config) var resource schema.ResourceJob
resource := out.Resource.Job["my_job"].(*schema.ResourceJob) out := produceTerraformConfiguration(t, config)
convertToResourceStruct(t, &resource, out.Resource.Job["my_job"])
assert.Equal(t, "my job", resource.Name) assert.Equal(t, "my job", resource.Name)
require.Len(t, resource.Task, 1) require.Len(t, resource.Task, 1)
require.Len(t, resource.Task[0].Library, 1) require.Len(t, resource.Task[0].Library, 1)
assert.Equal(t, "mlflow", resource.Task[0].Library[0].Pypi.Package) assert.Equal(t, "mlflow", resource.Task[0].Library[0].Pypi.Package)
bundleToTerraformEquivalenceTest(t, &config)
} }
func TestBundleToTerraformForEachTaskLibraries(t *testing.T) { func TestBundleToTerraformForEachTaskLibraries(t *testing.T) {
@ -172,15 +190,14 @@ func TestBundleToTerraformForEachTaskLibraries(t *testing.T) {
}, },
} }
out := BundleToTerraform(&config) var resource schema.ResourceJob
resource := out.Resource.Job["my_job"].(*schema.ResourceJob) out := produceTerraformConfiguration(t, config)
convertToResourceStruct(t, &resource, out.Resource.Job["my_job"])
assert.Equal(t, "my job", resource.Name) assert.Equal(t, "my job", resource.Name)
require.Len(t, resource.Task, 1) require.Len(t, resource.Task, 1)
require.Len(t, resource.Task[0].ForEachTask.Task.Library, 1) require.Len(t, resource.Task[0].ForEachTask.Task.Library, 1)
assert.Equal(t, "mlflow", resource.Task[0].ForEachTask.Task.Library[0].Pypi.Package) assert.Equal(t, "mlflow", resource.Task[0].ForEachTask.Task.Library[0].Pypi.Package)
bundleToTerraformEquivalenceTest(t, &config)
} }
func TestBundleToTerraformPipeline(t *testing.T) { func TestBundleToTerraformPipeline(t *testing.T) {
@ -230,8 +247,9 @@ func TestBundleToTerraformPipeline(t *testing.T) {
}, },
} }
out := BundleToTerraform(&config) var resource schema.ResourcePipeline
resource := out.Resource.Pipeline["my_pipeline"].(*schema.ResourcePipeline) out := produceTerraformConfiguration(t, config)
convertToResourceStruct(t, &resource, out.Resource.Pipeline["my_pipeline"])
assert.Equal(t, "my pipeline", resource.Name) assert.Equal(t, "my pipeline", resource.Name)
assert.Len(t, resource.Library, 2) assert.Len(t, resource.Library, 2)
@ -241,8 +259,6 @@ func TestBundleToTerraformPipeline(t *testing.T) {
assert.Equal(t, resource.Notification[1].Alerts, []string{"on-update-failure", "on-flow-failure"}) assert.Equal(t, resource.Notification[1].Alerts, []string{"on-update-failure", "on-flow-failure"})
assert.Equal(t, resource.Notification[1].EmailRecipients, []string{"jane@doe.com", "john@doe.com"}) assert.Equal(t, resource.Notification[1].EmailRecipients, []string{"jane@doe.com", "john@doe.com"})
assert.Nil(t, out.Data) assert.Nil(t, out.Data)
bundleToTerraformEquivalenceTest(t, &config)
} }
func TestBundleToTerraformPipelinePermissions(t *testing.T) { func TestBundleToTerraformPipelinePermissions(t *testing.T) {
@ -263,15 +279,14 @@ func TestBundleToTerraformPipelinePermissions(t *testing.T) {
}, },
} }
out := BundleToTerraform(&config) var resource schema.ResourcePermissions
resource := out.Resource.Permissions["pipeline_my_pipeline"].(*schema.ResourcePermissions) out := produceTerraformConfiguration(t, config)
convertToResourceStruct(t, &resource, out.Resource.Permissions["pipeline_my_pipeline"])
assert.NotEmpty(t, resource.PipelineId) assert.NotEmpty(t, resource.PipelineId)
assert.Len(t, resource.AccessControl, 1) assert.Len(t, resource.AccessControl, 1)
assert.Equal(t, "jane@doe.com", resource.AccessControl[0].UserName) assert.Equal(t, "jane@doe.com", resource.AccessControl[0].UserName)
assert.Equal(t, "CAN_VIEW", resource.AccessControl[0].PermissionLevel) assert.Equal(t, "CAN_VIEW", resource.AccessControl[0].PermissionLevel)
bundleToTerraformEquivalenceTest(t, &config)
} }
func TestBundleToTerraformModel(t *testing.T) { func TestBundleToTerraformModel(t *testing.T) {
@ -300,8 +315,9 @@ func TestBundleToTerraformModel(t *testing.T) {
}, },
} }
out := BundleToTerraform(&config) var resource schema.ResourceMlflowModel
resource := out.Resource.MlflowModel["my_model"].(*schema.ResourceMlflowModel) out := produceTerraformConfiguration(t, config)
convertToResourceStruct(t, &resource, out.Resource.MlflowModel["my_model"])
assert.Equal(t, "name", resource.Name) assert.Equal(t, "name", resource.Name)
assert.Equal(t, "description", resource.Description) assert.Equal(t, "description", resource.Description)
@ -311,8 +327,6 @@ func TestBundleToTerraformModel(t *testing.T) {
assert.Equal(t, "k2", resource.Tags[1].Key) assert.Equal(t, "k2", resource.Tags[1].Key)
assert.Equal(t, "v2", resource.Tags[1].Value) assert.Equal(t, "v2", resource.Tags[1].Value)
assert.Nil(t, out.Data) assert.Nil(t, out.Data)
bundleToTerraformEquivalenceTest(t, &config)
} }
func TestBundleToTerraformModelPermissions(t *testing.T) { func TestBundleToTerraformModelPermissions(t *testing.T) {
@ -336,15 +350,14 @@ func TestBundleToTerraformModelPermissions(t *testing.T) {
}, },
} }
out := BundleToTerraform(&config) var resource schema.ResourcePermissions
resource := out.Resource.Permissions["mlflow_model_my_model"].(*schema.ResourcePermissions) out := produceTerraformConfiguration(t, config)
convertToResourceStruct(t, &resource, out.Resource.Permissions["mlflow_model_my_model"])
assert.NotEmpty(t, resource.RegisteredModelId) assert.NotEmpty(t, resource.RegisteredModelId)
assert.Len(t, resource.AccessControl, 1) assert.Len(t, resource.AccessControl, 1)
assert.Equal(t, "jane@doe.com", resource.AccessControl[0].UserName) assert.Equal(t, "jane@doe.com", resource.AccessControl[0].UserName)
assert.Equal(t, "CAN_READ", resource.AccessControl[0].PermissionLevel) assert.Equal(t, "CAN_READ", resource.AccessControl[0].PermissionLevel)
bundleToTerraformEquivalenceTest(t, &config)
} }
func TestBundleToTerraformExperiment(t *testing.T) { func TestBundleToTerraformExperiment(t *testing.T) {
@ -362,13 +375,12 @@ func TestBundleToTerraformExperiment(t *testing.T) {
}, },
} }
out := BundleToTerraform(&config) var resource schema.ResourceMlflowExperiment
resource := out.Resource.MlflowExperiment["my_experiment"].(*schema.ResourceMlflowExperiment) out := produceTerraformConfiguration(t, config)
convertToResourceStruct(t, &resource, out.Resource.MlflowExperiment["my_experiment"])
assert.Equal(t, "name", resource.Name) assert.Equal(t, "name", resource.Name)
assert.Nil(t, out.Data) assert.Nil(t, out.Data)
bundleToTerraformEquivalenceTest(t, &config)
} }
func TestBundleToTerraformExperimentPermissions(t *testing.T) { func TestBundleToTerraformExperimentPermissions(t *testing.T) {
@ -392,15 +404,14 @@ func TestBundleToTerraformExperimentPermissions(t *testing.T) {
}, },
} }
out := BundleToTerraform(&config) var resource schema.ResourcePermissions
resource := out.Resource.Permissions["mlflow_experiment_my_experiment"].(*schema.ResourcePermissions) out := produceTerraformConfiguration(t, config)
convertToResourceStruct(t, &resource, out.Resource.Permissions["mlflow_experiment_my_experiment"])
assert.NotEmpty(t, resource.ExperimentId) assert.NotEmpty(t, resource.ExperimentId)
assert.Len(t, resource.AccessControl, 1) assert.Len(t, resource.AccessControl, 1)
assert.Equal(t, "jane@doe.com", resource.AccessControl[0].UserName) assert.Equal(t, "jane@doe.com", resource.AccessControl[0].UserName)
assert.Equal(t, "CAN_READ", resource.AccessControl[0].PermissionLevel) assert.Equal(t, "CAN_READ", resource.AccessControl[0].PermissionLevel)
bundleToTerraformEquivalenceTest(t, &config)
} }
func TestBundleToTerraformModelServing(t *testing.T) { func TestBundleToTerraformModelServing(t *testing.T) {
@ -436,8 +447,9 @@ func TestBundleToTerraformModelServing(t *testing.T) {
}, },
} }
out := BundleToTerraform(&config) var resource schema.ResourceModelServing
resource := out.Resource.ModelServing["my_model_serving_endpoint"].(*schema.ResourceModelServing) out := produceTerraformConfiguration(t, config)
convertToResourceStruct(t, &resource, out.Resource.ModelServing["my_model_serving_endpoint"])
assert.Equal(t, "name", resource.Name) assert.Equal(t, "name", resource.Name)
assert.Equal(t, "model_name", resource.Config.ServedModels[0].ModelName) assert.Equal(t, "model_name", resource.Config.ServedModels[0].ModelName)
@ -447,8 +459,6 @@ func TestBundleToTerraformModelServing(t *testing.T) {
assert.Equal(t, "model_name-1", resource.Config.TrafficConfig.Routes[0].ServedModelName) assert.Equal(t, "model_name-1", resource.Config.TrafficConfig.Routes[0].ServedModelName)
assert.Equal(t, 100, resource.Config.TrafficConfig.Routes[0].TrafficPercentage) assert.Equal(t, 100, resource.Config.TrafficConfig.Routes[0].TrafficPercentage)
assert.Nil(t, out.Data) assert.Nil(t, out.Data)
bundleToTerraformEquivalenceTest(t, &config)
} }
func TestBundleToTerraformModelServingPermissions(t *testing.T) { func TestBundleToTerraformModelServingPermissions(t *testing.T) {
@ -490,15 +500,14 @@ func TestBundleToTerraformModelServingPermissions(t *testing.T) {
}, },
} }
out := BundleToTerraform(&config) var resource schema.ResourcePermissions
resource := out.Resource.Permissions["model_serving_my_model_serving_endpoint"].(*schema.ResourcePermissions) out := produceTerraformConfiguration(t, config)
convertToResourceStruct(t, &resource, out.Resource.Permissions["model_serving_my_model_serving_endpoint"])
assert.NotEmpty(t, resource.ServingEndpointId) assert.NotEmpty(t, resource.ServingEndpointId)
assert.Len(t, resource.AccessControl, 1) assert.Len(t, resource.AccessControl, 1)
assert.Equal(t, "jane@doe.com", resource.AccessControl[0].UserName) assert.Equal(t, "jane@doe.com", resource.AccessControl[0].UserName)
assert.Equal(t, "CAN_VIEW", resource.AccessControl[0].PermissionLevel) assert.Equal(t, "CAN_VIEW", resource.AccessControl[0].PermissionLevel)
bundleToTerraformEquivalenceTest(t, &config)
} }
func TestBundleToTerraformRegisteredModel(t *testing.T) { func TestBundleToTerraformRegisteredModel(t *testing.T) {
@ -519,16 +528,15 @@ func TestBundleToTerraformRegisteredModel(t *testing.T) {
}, },
} }
out := BundleToTerraform(&config) var resource schema.ResourceRegisteredModel
resource := out.Resource.RegisteredModel["my_registered_model"].(*schema.ResourceRegisteredModel) out := produceTerraformConfiguration(t, config)
convertToResourceStruct(t, &resource, out.Resource.RegisteredModel["my_registered_model"])
assert.Equal(t, "name", resource.Name) assert.Equal(t, "name", resource.Name)
assert.Equal(t, "catalog", resource.CatalogName) assert.Equal(t, "catalog", resource.CatalogName)
assert.Equal(t, "schema", resource.SchemaName) assert.Equal(t, "schema", resource.SchemaName)
assert.Equal(t, "comment", resource.Comment) assert.Equal(t, "comment", resource.Comment)
assert.Nil(t, out.Data) assert.Nil(t, out.Data)
bundleToTerraformEquivalenceTest(t, &config)
} }
func TestBundleToTerraformRegisteredModelGrants(t *testing.T) { func TestBundleToTerraformRegisteredModelGrants(t *testing.T) {
@ -554,15 +562,14 @@ func TestBundleToTerraformRegisteredModelGrants(t *testing.T) {
}, },
} }
out := BundleToTerraform(&config) var resource schema.ResourceGrants
resource := out.Resource.Grants["registered_model_my_registered_model"].(*schema.ResourceGrants) out := produceTerraformConfiguration(t, config)
convertToResourceStruct(t, &resource, out.Resource.Grants["registered_model_my_registered_model"])
assert.NotEmpty(t, resource.Function) assert.NotEmpty(t, resource.Function)
assert.Len(t, resource.Grant, 1) assert.Len(t, resource.Grant, 1)
assert.Equal(t, "jane@doe.com", resource.Grant[0].Principal) assert.Equal(t, "jane@doe.com", resource.Grant[0].Principal)
assert.Equal(t, "EXECUTE", resource.Grant[0].Privileges[0]) assert.Equal(t, "EXECUTE", resource.Grant[0].Privileges[0])
bundleToTerraformEquivalenceTest(t, &config)
} }
func TestBundleToTerraformDeletedResources(t *testing.T) { func TestBundleToTerraformDeletedResources(t *testing.T) {
@ -671,6 +678,14 @@ func TestTerraformToBundleEmptyLocalResources(t *testing.T) {
{Attributes: stateInstanceAttributes{ID: "1"}}, {Attributes: stateInstanceAttributes{ID: "1"}},
}, },
}, },
{
Type: "databricks_dashboard",
Mode: "managed",
Name: "test_dashboard",
Instances: []stateResourceInstance{
{Attributes: stateInstanceAttributes{ID: "1"}},
},
},
}, },
} }
err := TerraformToBundle(&tfState, &config) err := TerraformToBundle(&tfState, &config)
@ -703,6 +718,9 @@ func TestTerraformToBundleEmptyLocalResources(t *testing.T) {
assert.Equal(t, "1", config.Resources.Clusters["test_cluster"].ID) assert.Equal(t, "1", config.Resources.Clusters["test_cluster"].ID)
assert.Equal(t, resources.ModifiedStatusDeleted, config.Resources.Clusters["test_cluster"].ModifiedStatus) assert.Equal(t, resources.ModifiedStatusDeleted, config.Resources.Clusters["test_cluster"].ModifiedStatus)
assert.Equal(t, "1", config.Resources.Dashboards["test_dashboard"].ID)
assert.Equal(t, resources.ModifiedStatusDeleted, config.Resources.Dashboards["test_dashboard"].ModifiedStatus)
AssertFullResourceCoverage(t, &config) AssertFullResourceCoverage(t, &config)
} }
@ -772,6 +790,13 @@ func TestTerraformToBundleEmptyRemoteResources(t *testing.T) {
}, },
}, },
}, },
Dashboards: map[string]*resources.Dashboard{
"test_dashboard": {
CreateDashboardRequest: &dashboards.CreateDashboardRequest{
DisplayName: "test_dashboard",
},
},
},
}, },
} }
var tfState = resourcesState{ var tfState = resourcesState{
@ -807,6 +832,9 @@ func TestTerraformToBundleEmptyRemoteResources(t *testing.T) {
assert.Equal(t, "", config.Resources.Clusters["test_cluster"].ID) assert.Equal(t, "", config.Resources.Clusters["test_cluster"].ID)
assert.Equal(t, resources.ModifiedStatusCreated, config.Resources.Clusters["test_cluster"].ModifiedStatus) assert.Equal(t, resources.ModifiedStatusCreated, config.Resources.Clusters["test_cluster"].ModifiedStatus)
assert.Equal(t, "", config.Resources.Dashboards["test_dashboard"].ID)
assert.Equal(t, resources.ModifiedStatusCreated, config.Resources.Dashboards["test_dashboard"].ModifiedStatus)
AssertFullResourceCoverage(t, &config) AssertFullResourceCoverage(t, &config)
} }
@ -921,6 +949,18 @@ func TestTerraformToBundleModifiedResources(t *testing.T) {
}, },
}, },
}, },
Dashboards: map[string]*resources.Dashboard{
"test_dashboard": {
CreateDashboardRequest: &dashboards.CreateDashboardRequest{
DisplayName: "test_dashboard",
},
},
"test_dashboard_new": {
CreateDashboardRequest: &dashboards.CreateDashboardRequest{
DisplayName: "test_dashboard_new",
},
},
},
}, },
} }
var tfState = resourcesState{ var tfState = resourcesState{
@ -1069,6 +1109,22 @@ func TestTerraformToBundleModifiedResources(t *testing.T) {
{Attributes: stateInstanceAttributes{ID: "2"}}, {Attributes: stateInstanceAttributes{ID: "2"}},
}, },
}, },
{
Type: "databricks_dashboard",
Mode: "managed",
Name: "test_dashboard",
Instances: []stateResourceInstance{
{Attributes: stateInstanceAttributes{ID: "1"}},
},
},
{
Type: "databricks_dashboard",
Mode: "managed",
Name: "test_dashboard_old",
Instances: []stateResourceInstance{
{Attributes: stateInstanceAttributes{ID: "2"}},
},
},
}, },
} }
err := TerraformToBundle(&tfState, &config) err := TerraformToBundle(&tfState, &config)
@ -1137,6 +1193,13 @@ func TestTerraformToBundleModifiedResources(t *testing.T) {
assert.Equal(t, "", config.Resources.Clusters["test_cluster_new"].ID) assert.Equal(t, "", config.Resources.Clusters["test_cluster_new"].ID)
assert.Equal(t, resources.ModifiedStatusCreated, config.Resources.Clusters["test_cluster_new"].ModifiedStatus) assert.Equal(t, resources.ModifiedStatusCreated, config.Resources.Clusters["test_cluster_new"].ModifiedStatus)
assert.Equal(t, "1", config.Resources.Dashboards["test_dashboard"].ID)
assert.Equal(t, "", config.Resources.Dashboards["test_dashboard"].ModifiedStatus)
assert.Equal(t, "2", config.Resources.Dashboards["test_dashboard_old"].ID)
assert.Equal(t, resources.ModifiedStatusDeleted, config.Resources.Dashboards["test_dashboard_old"].ModifiedStatus)
assert.Equal(t, "", config.Resources.Dashboards["test_dashboard_new"].ID)
assert.Equal(t, resources.ModifiedStatusCreated, config.Resources.Dashboards["test_dashboard_new"].ModifiedStatus)
AssertFullResourceCoverage(t, &config) AssertFullResourceCoverage(t, &config)
} }
@ -1154,25 +1217,3 @@ func AssertFullResourceCoverage(t *testing.T, config *config.Root) {
} }
} }
} }
func assertEqualTerraformRoot(t *testing.T, a, b *schema.Root) {
ba, err := json.Marshal(a)
require.NoError(t, err)
bb, err := json.Marshal(b)
require.NoError(t, err)
assert.JSONEq(t, string(ba), string(bb))
}
func bundleToTerraformEquivalenceTest(t *testing.T, config *config.Root) {
t.Run("dyn equivalence", func(t *testing.T) {
tf1 := BundleToTerraform(config)
vin, err := convert.FromTyped(config, dyn.NilValue)
require.NoError(t, err)
tf2, err := BundleToTerraformWithDynValue(context.Background(), vin)
require.NoError(t, err)
// Compare roots
assertEqualTerraformRoot(t, tf1, tf2)
})
}

View File

@ -60,6 +60,8 @@ func (m *interpolateMutator) Apply(ctx context.Context, b *bundle.Bundle) diag.D
path = dyn.NewPath(dyn.Key("databricks_schema")).Append(path[2:]...) path = dyn.NewPath(dyn.Key("databricks_schema")).Append(path[2:]...)
case dyn.Key("clusters"): case dyn.Key("clusters"):
path = dyn.NewPath(dyn.Key("databricks_cluster")).Append(path[2:]...) path = dyn.NewPath(dyn.Key("databricks_cluster")).Append(path[2:]...)
case dyn.Key("dashboards"):
path = dyn.NewPath(dyn.Key("databricks_dashboard")).Append(path[2:]...)
default: default:
// Trigger "key not found" for unknown resource types. // Trigger "key not found" for unknown resource types.
return dyn.GetByPath(root, path) return dyn.GetByPath(root, path)

View File

@ -32,6 +32,7 @@ func TestInterpolate(t *testing.T) {
"other_registered_model": "${resources.registered_models.other_registered_model.id}", "other_registered_model": "${resources.registered_models.other_registered_model.id}",
"other_schema": "${resources.schemas.other_schema.id}", "other_schema": "${resources.schemas.other_schema.id}",
"other_cluster": "${resources.clusters.other_cluster.id}", "other_cluster": "${resources.clusters.other_cluster.id}",
"other_dashboard": "${resources.dashboards.other_dashboard.id}",
}, },
Tasks: []jobs.Task{ Tasks: []jobs.Task{
{ {
@ -69,6 +70,7 @@ func TestInterpolate(t *testing.T) {
assert.Equal(t, "${databricks_registered_model.other_registered_model.id}", j.Tags["other_registered_model"]) assert.Equal(t, "${databricks_registered_model.other_registered_model.id}", j.Tags["other_registered_model"])
assert.Equal(t, "${databricks_schema.other_schema.id}", j.Tags["other_schema"]) assert.Equal(t, "${databricks_schema.other_schema.id}", j.Tags["other_schema"])
assert.Equal(t, "${databricks_cluster.other_cluster.id}", j.Tags["other_cluster"]) assert.Equal(t, "${databricks_cluster.other_cluster.id}", j.Tags["other_cluster"])
assert.Equal(t, "${databricks_dashboard.other_dashboard.id}", j.Tags["other_dashboard"])
m := b.Config.Resources.Models["my_model"] m := b.Config.Resources.Models["my_model"]
assert.Equal(t, "my_model", m.Model.Name) assert.Equal(t, "my_model", m.Model.Name)

View File

@ -40,7 +40,7 @@ func (clusterConverter) Convert(ctx context.Context, key string, vin dyn.Value,
// Configure permissions for this resource. // Configure permissions for this resource.
if permissions := convertPermissionsResource(ctx, vin); permissions != nil { if permissions := convertPermissionsResource(ctx, vin); permissions != nil {
permissions.JobId = fmt.Sprintf("${databricks_cluster.%s.id}", key) permissions.ClusterId = fmt.Sprintf("${databricks_cluster.%s.id}", key)
out.Permissions["cluster_"+key] = permissions out.Permissions["cluster_"+key] = permissions
} }

View File

@ -81,7 +81,7 @@ func TestConvertCluster(t *testing.T) {
// Assert equality on the permissions // Assert equality on the permissions
assert.Equal(t, &schema.ResourcePermissions{ assert.Equal(t, &schema.ResourcePermissions{
JobId: "${databricks_cluster.my_cluster.id}", ClusterId: "${databricks_cluster.my_cluster.id}",
AccessControl: []schema.ResourcePermissionsAccessControl{ AccessControl: []schema.ResourcePermissionsAccessControl{
{ {
PermissionLevel: "CAN_RUN", PermissionLevel: "CAN_RUN",

View File

@ -0,0 +1,109 @@
package tfdyn
import (
"context"
"encoding/json"
"fmt"
"github.com/databricks/cli/bundle/internal/tf/schema"
"github.com/databricks/cli/libs/dyn"
"github.com/databricks/cli/libs/dyn/convert"
"github.com/databricks/cli/libs/log"
)
const (
filePathFieldName = "file_path"
serializedDashboardFieldName = "serialized_dashboard"
)
// Marshal "serialized_dashboard" as JSON if it is set in the input but not in the output.
func marshalSerializedDashboard(vin dyn.Value, vout dyn.Value) (dyn.Value, error) {
// Skip if the "serialized_dashboard" field is already set.
if v := vout.Get(serializedDashboardFieldName); v.IsValid() {
return vout, nil
}
// Skip if the "serialized_dashboard" field on the input is not set.
v := vin.Get(serializedDashboardFieldName)
if !v.IsValid() {
return vout, nil
}
// Marshal the "serialized_dashboard" field as JSON.
data, err := json.Marshal(v.AsAny())
if err != nil {
return dyn.InvalidValue, fmt.Errorf("failed to marshal serialized_dashboard: %w", err)
}
// Set the "serialized_dashboard" field on the output.
return dyn.Set(vout, serializedDashboardFieldName, dyn.V(string(data)))
}
func convertDashboardResource(ctx context.Context, vin dyn.Value) (dyn.Value, error) {
var err error
// Normalize the output value to the target schema.
vout, diags := convert.Normalize(schema.ResourceDashboard{}, vin)
for _, diag := range diags {
log.Debugf(ctx, "dashboard normalization diagnostic: %s", diag.Summary)
}
// Include "serialized_dashboard" field if "file_path" is set.
// Note: the Terraform resource supports "file_path" natively, but its
// change detection mechanism doesn't work as expected at the time of writing (Sep 30).
if path, ok := vout.Get(filePathFieldName).AsString(); ok {
vout, err = dyn.Set(vout, serializedDashboardFieldName, dyn.V(fmt.Sprintf("${file(%q)}", path)))
if err != nil {
return dyn.InvalidValue, fmt.Errorf("failed to set serialized_dashboard: %w", err)
}
// Drop the "file_path" field. It is mutually exclusive with "serialized_dashboard".
vout, err = dyn.Walk(vout, func(p dyn.Path, v dyn.Value) (dyn.Value, error) {
switch len(p) {
case 0:
return v, nil
case 1:
if p[0] == dyn.Key(filePathFieldName) {
return v, dyn.ErrDrop
}
}
// Skip everything else.
return v, dyn.ErrSkip
})
if err != nil {
return dyn.InvalidValue, fmt.Errorf("failed to drop file_path: %w", err)
}
}
// Marshal "serialized_dashboard" as JSON if it is set in the input but not in the output.
vout, err = marshalSerializedDashboard(vin, vout)
if err != nil {
return dyn.InvalidValue, err
}
return vout, nil
}
type dashboardConverter struct{}
func (dashboardConverter) Convert(ctx context.Context, key string, vin dyn.Value, out *schema.Resources) error {
vout, err := convertDashboardResource(ctx, vin)
if err != nil {
return err
}
// Add the converted resource to the output.
out.Dashboard[key] = vout.AsAny()
// Configure permissions for this resource.
if permissions := convertPermissionsResource(ctx, vin); permissions != nil {
permissions.DashboardId = fmt.Sprintf("${databricks_dashboard.%s.id}", key)
out.Permissions["dashboard_"+key] = permissions
}
return nil
}
func init() {
registerConverter("dashboards", dashboardConverter{})
}

View File

@ -0,0 +1,153 @@
package tfdyn
import (
"context"
"testing"
"github.com/databricks/cli/bundle/config/resources"
"github.com/databricks/cli/bundle/internal/tf/schema"
"github.com/databricks/cli/libs/dyn"
"github.com/databricks/cli/libs/dyn/convert"
"github.com/databricks/databricks-sdk-go/service/dashboards"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestConvertDashboard(t *testing.T) {
var src = resources.Dashboard{
CreateDashboardRequest: &dashboards.CreateDashboardRequest{
DisplayName: "my dashboard",
WarehouseId: "f00dcafe",
ParentPath: "/some/path",
},
EmbedCredentials: true,
Permissions: []resources.Permission{
{
Level: "CAN_VIEW",
UserName: "jane@doe.com",
},
},
}
vin, err := convert.FromTyped(src, dyn.NilValue)
require.NoError(t, err)
ctx := context.Background()
out := schema.NewResources()
err = dashboardConverter{}.Convert(ctx, "my_dashboard", vin, out)
require.NoError(t, err)
// Assert equality on the job
assert.Equal(t, map[string]any{
"display_name": "my dashboard",
"warehouse_id": "f00dcafe",
"parent_path": "/some/path",
"embed_credentials": true,
}, out.Dashboard["my_dashboard"])
// Assert equality on the permissions
assert.Equal(t, &schema.ResourcePermissions{
DashboardId: "${databricks_dashboard.my_dashboard.id}",
AccessControl: []schema.ResourcePermissionsAccessControl{
{
PermissionLevel: "CAN_VIEW",
UserName: "jane@doe.com",
},
},
}, out.Permissions["dashboard_my_dashboard"])
}
func TestConvertDashboardFilePath(t *testing.T) {
var src = resources.Dashboard{
FilePath: "some/path",
}
vin, err := convert.FromTyped(src, dyn.NilValue)
require.NoError(t, err)
ctx := context.Background()
out := schema.NewResources()
err = dashboardConverter{}.Convert(ctx, "my_dashboard", vin, out)
require.NoError(t, err)
// Assert that the "serialized_dashboard" is included.
assert.Subset(t, out.Dashboard["my_dashboard"], map[string]any{
"serialized_dashboard": "${file(\"some/path\")}",
})
// Assert that the "file_path" doesn't carry over.
assert.NotSubset(t, out.Dashboard["my_dashboard"], map[string]any{
"file_path": "some/path",
})
}
func TestConvertDashboardFilePathQuoted(t *testing.T) {
var src = resources.Dashboard{
FilePath: `C:\foo\bar\baz\dashboard.lvdash.json`,
}
vin, err := convert.FromTyped(src, dyn.NilValue)
require.NoError(t, err)
ctx := context.Background()
out := schema.NewResources()
err = dashboardConverter{}.Convert(ctx, "my_dashboard", vin, out)
require.NoError(t, err)
// Assert that the "serialized_dashboard" is included.
assert.Subset(t, out.Dashboard["my_dashboard"], map[string]any{
"serialized_dashboard": `${file("C:\\foo\\bar\\baz\\dashboard.lvdash.json")}`,
})
// Assert that the "file_path" doesn't carry over.
assert.NotSubset(t, out.Dashboard["my_dashboard"], map[string]any{
"file_path": `C:\foo\bar\baz\dashboard.lvdash.json`,
})
}
func TestConvertDashboardSerializedDashboardString(t *testing.T) {
var src = resources.Dashboard{
SerializedDashboard: `{ "json": true }`,
}
vin, err := convert.FromTyped(src, dyn.NilValue)
require.NoError(t, err)
ctx := context.Background()
out := schema.NewResources()
err = dashboardConverter{}.Convert(ctx, "my_dashboard", vin, out)
require.NoError(t, err)
// Assert that the "serialized_dashboard" is included.
assert.Subset(t, out.Dashboard["my_dashboard"], map[string]any{
"serialized_dashboard": `{ "json": true }`,
})
}
func TestConvertDashboardSerializedDashboardAny(t *testing.T) {
var src = resources.Dashboard{
SerializedDashboard: map[string]any{
"pages": []map[string]any{
{
"displayName": "New Page",
"layout": []map[string]any{},
},
},
},
}
vin, err := convert.FromTyped(src, dyn.NilValue)
require.NoError(t, err)
ctx := context.Background()
out := schema.NewResources()
err = dashboardConverter{}.Convert(ctx, "my_dashboard", vin, out)
require.NoError(t, err)
// Assert that the "serialized_dashboard" is included.
assert.Subset(t, out.Dashboard["my_dashboard"], map[string]any{
"serialized_dashboard": `{"pages":[{"displayName":"New Page","layout":[]}]}`,
})
}

View File

@ -13,7 +13,7 @@ import (
// Partial representation of the Terraform state file format. // Partial representation of the Terraform state file format.
// We are only interested global version and serial numbers, // We are only interested global version and serial numbers,
// plus resource types, names, modes, and ids. // plus resource types, names, modes, IDs, and ETags (for dashboards).
type resourcesState struct { type resourcesState struct {
Version int `json:"version"` Version int `json:"version"`
Resources []stateResource `json:"resources"` Resources []stateResource `json:"resources"`
@ -34,6 +34,7 @@ type stateResourceInstance struct {
type stateInstanceAttributes struct { type stateInstanceAttributes struct {
ID string `json:"id"` ID string `json:"id"`
ETag string `json:"etag,omitempty"`
} }
func ParseResourcesState(ctx context.Context, b *bundle.Bundle) (*resourcesState, error) { func ParseResourcesState(ctx context.Context, b *bundle.Bundle) (*resourcesState, error) {

View File

@ -0,0 +1,20 @@
package bundletest
import (
"context"
"testing"
"github.com/databricks/cli/bundle"
"github.com/databricks/cli/libs/diag"
"github.com/databricks/cli/libs/dyn"
"github.com/stretchr/testify/require"
)
func Mutate(t *testing.T, b *bundle.Bundle, f func(v dyn.Value) (dyn.Value, error)) {
diags := bundle.ApplyFunc(context.Background(), b, func(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
err := b.Config.Mutate(f)
require.NoError(t, err)
return nil
})
require.NoError(t, diags.Error())
}

View File

@ -8,8 +8,10 @@ import (
"reflect" "reflect"
"github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config"
"github.com/databricks/cli/bundle/config/resources"
"github.com/databricks/cli/bundle/config/variable" "github.com/databricks/cli/bundle/config/variable"
"github.com/databricks/cli/libs/jsonschema" "github.com/databricks/cli/libs/jsonschema"
"github.com/databricks/databricks-sdk-go/service/jobs"
) )
func interpolationPattern(s string) string { func interpolationPattern(s string) string {
@ -66,6 +68,31 @@ func addInterpolationPatterns(typ reflect.Type, s jsonschema.Schema) jsonschema.
} }
} }
func removeJobsFields(typ reflect.Type, s jsonschema.Schema) jsonschema.Schema {
switch typ {
case reflect.TypeOf(resources.Job{}):
// This field has been deprecated in jobs API v2.1 and is always set to
// "MULTI_TASK" in the backend. We should not expose it to the user.
delete(s.Properties, "format")
// These fields are only meant to be set by the DABs client (ie the CLI)
// and thus should not be exposed to the user. These are used to annotate
// jobs that were created by DABs.
delete(s.Properties, "deployment")
delete(s.Properties, "edit_mode")
case reflect.TypeOf(jobs.GitSource{}):
// These fields are readonly and are not meant to be set by the user.
delete(s.Properties, "job_source")
delete(s.Properties, "git_snapshot")
default:
// Do nothing
}
return s
}
func main() { func main() {
if len(os.Args) != 2 { if len(os.Args) != 2 {
fmt.Println("Usage: go run main.go <output-file>") fmt.Println("Usage: go run main.go <output-file>")
@ -90,6 +117,7 @@ func main() {
s, err := jsonschema.FromType(reflect.TypeOf(config.Root{}), []func(reflect.Type, jsonschema.Schema) jsonschema.Schema{ s, err := jsonschema.FromType(reflect.TypeOf(config.Root{}), []func(reflect.Type, jsonschema.Schema) jsonschema.Schema{
p.addDescriptions, p.addDescriptions,
p.addEnums, p.addEnums,
removeJobsFields,
addInterpolationPatterns, addInterpolationPatterns,
}) })
if err != nil { if err != nil {

View File

@ -0,0 +1,4 @@
resources:
jobs:
foo:
format: SINGLE_TASK

View File

@ -0,0 +1,6 @@
resources:
jobs:
foo:
deployment:
kind: BUNDLE
metadata_file_path: /a/b/c

View File

@ -0,0 +1,6 @@
targets:
foo:
resources:
jobs:
bar:
edit_mode: whatever

View File

@ -0,0 +1,8 @@
resources:
jobs:
foo:
git_source:
git_provider: GITHUB
git_url: www.whatever.com
git_snapshot:
used_commit: abcdef

View File

@ -0,0 +1,9 @@
resources:
jobs:
foo:
git_source:
git_provider: GITHUB
git_url: www.whatever.com
job_source:
import_from_git_branch: master
job_config_path: def

View File

@ -32,7 +32,6 @@ resources:
name: myjob name: myjob
continuous: continuous:
pause_status: PAUSED pause_status: PAUSED
edit_mode: EDITABLE
max_concurrent_runs: 10 max_concurrent_runs: 10
description: "my job description" description: "my job description"
email_notifications: email_notifications:
@ -43,10 +42,12 @@ resources:
dependencies: dependencies:
- python=3.7 - python=3.7
client: "myclient" client: "myclient"
format: MULTI_TASK
tags: tags:
foo: bar foo: bar
bar: baz bar: baz
git_source:
git_provider: gitHub
git_url: www.github.com/a/b
tasks: tasks:
- task_key: mytask - task_key: mytask
notebook_task: notebook_task:

View File

@ -1,3 +1,3 @@
package schema package schema
const ProviderVersion = "1.52.0" const ProviderVersion = "1.54.0"

View File

@ -10,6 +10,7 @@ type DataSourceCurrentMetastoreMetastoreInfo struct {
DeltaSharingOrganizationName string `json:"delta_sharing_organization_name,omitempty"` DeltaSharingOrganizationName string `json:"delta_sharing_organization_name,omitempty"`
DeltaSharingRecipientTokenLifetimeInSeconds int `json:"delta_sharing_recipient_token_lifetime_in_seconds,omitempty"` DeltaSharingRecipientTokenLifetimeInSeconds int `json:"delta_sharing_recipient_token_lifetime_in_seconds,omitempty"`
DeltaSharingScope string `json:"delta_sharing_scope,omitempty"` DeltaSharingScope string `json:"delta_sharing_scope,omitempty"`
ExternalAccessEnabled bool `json:"external_access_enabled,omitempty"`
GlobalMetastoreId string `json:"global_metastore_id,omitempty"` GlobalMetastoreId string `json:"global_metastore_id,omitempty"`
MetastoreId string `json:"metastore_id,omitempty"` MetastoreId string `json:"metastore_id,omitempty"`
Name string `json:"name,omitempty"` Name string `json:"name,omitempty"`

View File

@ -10,6 +10,7 @@ type DataSourceMetastoreMetastoreInfo struct {
DeltaSharingOrganizationName string `json:"delta_sharing_organization_name,omitempty"` DeltaSharingOrganizationName string `json:"delta_sharing_organization_name,omitempty"`
DeltaSharingRecipientTokenLifetimeInSeconds int `json:"delta_sharing_recipient_token_lifetime_in_seconds,omitempty"` DeltaSharingRecipientTokenLifetimeInSeconds int `json:"delta_sharing_recipient_token_lifetime_in_seconds,omitempty"`
DeltaSharingScope string `json:"delta_sharing_scope,omitempty"` DeltaSharingScope string `json:"delta_sharing_scope,omitempty"`
ExternalAccessEnabled bool `json:"external_access_enabled,omitempty"`
GlobalMetastoreId string `json:"global_metastore_id,omitempty"` GlobalMetastoreId string `json:"global_metastore_id,omitempty"`
MetastoreId string `json:"metastore_id,omitempty"` MetastoreId string `json:"metastore_id,omitempty"`
Name string `json:"name,omitempty"` Name string `json:"name,omitempty"`

View File

@ -0,0 +1,8 @@
// Generated from Databricks Terraform provider schema. DO NOT EDIT.
package schema
type DataSourceMlflowModels struct {
Id string `json:"id,omitempty"`
Names []string `json:"names,omitempty"`
}

View File

@ -0,0 +1,15 @@
// Generated from Databricks Terraform provider schema. DO NOT EDIT.
package schema
type DataSourceNotificationDestinationsNotificationDestinations struct {
DestinationType string `json:"destination_type,omitempty"`
DisplayName string `json:"display_name,omitempty"`
Id string `json:"id,omitempty"`
}
type DataSourceNotificationDestinations struct {
DisplayNameContains string `json:"display_name_contains,omitempty"`
Type string `json:"type,omitempty"`
NotificationDestinations []DataSourceNotificationDestinationsNotificationDestinations `json:"notification_destinations,omitempty"`
}

View File

@ -0,0 +1,32 @@
// Generated from Databricks Terraform provider schema. DO NOT EDIT.
package schema
type DataSourceRegisteredModelModelInfoAliases struct {
AliasName string `json:"alias_name,omitempty"`
VersionNum int `json:"version_num,omitempty"`
}
type DataSourceRegisteredModelModelInfo struct {
BrowseOnly bool `json:"browse_only,omitempty"`
CatalogName string `json:"catalog_name,omitempty"`
Comment string `json:"comment,omitempty"`
CreatedAt int `json:"created_at,omitempty"`
CreatedBy string `json:"created_by,omitempty"`
FullName string `json:"full_name,omitempty"`
MetastoreId string `json:"metastore_id,omitempty"`
Name string `json:"name,omitempty"`
Owner string `json:"owner,omitempty"`
SchemaName string `json:"schema_name,omitempty"`
StorageLocation string `json:"storage_location,omitempty"`
UpdatedAt int `json:"updated_at,omitempty"`
UpdatedBy string `json:"updated_by,omitempty"`
Aliases []DataSourceRegisteredModelModelInfoAliases `json:"aliases,omitempty"`
}
type DataSourceRegisteredModel struct {
FullName string `json:"full_name"`
IncludeAliases bool `json:"include_aliases,omitempty"`
IncludeBrowse bool `json:"include_browse,omitempty"`
ModelInfo []DataSourceRegisteredModelModelInfo `json:"model_info,omitempty"`
}

View File

@ -30,12 +30,15 @@ type DataSources struct {
Metastores map[string]any `json:"databricks_metastores,omitempty"` Metastores map[string]any `json:"databricks_metastores,omitempty"`
MlflowExperiment map[string]any `json:"databricks_mlflow_experiment,omitempty"` MlflowExperiment map[string]any `json:"databricks_mlflow_experiment,omitempty"`
MlflowModel map[string]any `json:"databricks_mlflow_model,omitempty"` MlflowModel map[string]any `json:"databricks_mlflow_model,omitempty"`
MlflowModels map[string]any `json:"databricks_mlflow_models,omitempty"`
MwsCredentials map[string]any `json:"databricks_mws_credentials,omitempty"` MwsCredentials map[string]any `json:"databricks_mws_credentials,omitempty"`
MwsWorkspaces map[string]any `json:"databricks_mws_workspaces,omitempty"` MwsWorkspaces map[string]any `json:"databricks_mws_workspaces,omitempty"`
NodeType map[string]any `json:"databricks_node_type,omitempty"` NodeType map[string]any `json:"databricks_node_type,omitempty"`
Notebook map[string]any `json:"databricks_notebook,omitempty"` Notebook map[string]any `json:"databricks_notebook,omitempty"`
NotebookPaths map[string]any `json:"databricks_notebook_paths,omitempty"` NotebookPaths map[string]any `json:"databricks_notebook_paths,omitempty"`
NotificationDestinations map[string]any `json:"databricks_notification_destinations,omitempty"`
Pipelines map[string]any `json:"databricks_pipelines,omitempty"` Pipelines map[string]any `json:"databricks_pipelines,omitempty"`
RegisteredModel map[string]any `json:"databricks_registered_model,omitempty"`
Schema map[string]any `json:"databricks_schema,omitempty"` Schema map[string]any `json:"databricks_schema,omitempty"`
Schemas map[string]any `json:"databricks_schemas,omitempty"` Schemas map[string]any `json:"databricks_schemas,omitempty"`
ServicePrincipal map[string]any `json:"databricks_service_principal,omitempty"` ServicePrincipal map[string]any `json:"databricks_service_principal,omitempty"`
@ -85,12 +88,15 @@ func NewDataSources() *DataSources {
Metastores: make(map[string]any), Metastores: make(map[string]any),
MlflowExperiment: make(map[string]any), MlflowExperiment: make(map[string]any),
MlflowModel: make(map[string]any), MlflowModel: make(map[string]any),
MlflowModels: make(map[string]any),
MwsCredentials: make(map[string]any), MwsCredentials: make(map[string]any),
MwsWorkspaces: make(map[string]any), MwsWorkspaces: make(map[string]any),
NodeType: make(map[string]any), NodeType: make(map[string]any),
Notebook: make(map[string]any), Notebook: make(map[string]any),
NotebookPaths: make(map[string]any), NotebookPaths: make(map[string]any),
NotificationDestinations: make(map[string]any),
Pipelines: make(map[string]any), Pipelines: make(map[string]any),
RegisteredModel: make(map[string]any),
Schema: make(map[string]any), Schema: make(map[string]any),
Schemas: make(map[string]any), Schemas: make(map[string]any),
ServicePrincipal: make(map[string]any), ServicePrincipal: make(map[string]any),

View File

@ -0,0 +1,49 @@
// Generated from Databricks Terraform provider schema. DO NOT EDIT.
package schema
type ResourceBudgetAlertConfigurationsActionConfigurations struct {
ActionConfigurationId string `json:"action_configuration_id,omitempty"`
ActionType string `json:"action_type,omitempty"`
Target string `json:"target,omitempty"`
}
type ResourceBudgetAlertConfigurations struct {
AlertConfigurationId string `json:"alert_configuration_id,omitempty"`
QuantityThreshold string `json:"quantity_threshold,omitempty"`
QuantityType string `json:"quantity_type,omitempty"`
TimePeriod string `json:"time_period,omitempty"`
TriggerType string `json:"trigger_type,omitempty"`
ActionConfigurations []ResourceBudgetAlertConfigurationsActionConfigurations `json:"action_configurations,omitempty"`
}
type ResourceBudgetFilterTagsValue struct {
Operator string `json:"operator,omitempty"`
Values []string `json:"values,omitempty"`
}
type ResourceBudgetFilterTags struct {
Key string `json:"key,omitempty"`
Value *ResourceBudgetFilterTagsValue `json:"value,omitempty"`
}
type ResourceBudgetFilterWorkspaceId struct {
Operator string `json:"operator,omitempty"`
Values []int `json:"values,omitempty"`
}
type ResourceBudgetFilter struct {
Tags []ResourceBudgetFilterTags `json:"tags,omitempty"`
WorkspaceId *ResourceBudgetFilterWorkspaceId `json:"workspace_id,omitempty"`
}
type ResourceBudget struct {
AccountId string `json:"account_id,omitempty"`
BudgetConfigurationId string `json:"budget_configuration_id,omitempty"`
CreateTime int `json:"create_time,omitempty"`
DisplayName string `json:"display_name,omitempty"`
Id string `json:"id,omitempty"`
UpdateTime int `json:"update_time,omitempty"`
AlertConfigurations []ResourceBudgetAlertConfigurations `json:"alert_configurations,omitempty"`
Filter *ResourceBudgetFilter `json:"filter,omitempty"`
}

View File

@ -1448,6 +1448,7 @@ type ResourceJobWebhookNotifications struct {
type ResourceJob struct { type ResourceJob struct {
AlwaysRunning bool `json:"always_running,omitempty"` AlwaysRunning bool `json:"always_running,omitempty"`
BudgetPolicyId string `json:"budget_policy_id,omitempty"`
ControlRunState bool `json:"control_run_state,omitempty"` ControlRunState bool `json:"control_run_state,omitempty"`
Description string `json:"description,omitempty"` Description string `json:"description,omitempty"`
EditMode string `json:"edit_mode,omitempty"` EditMode string `json:"edit_mode,omitempty"`

View File

@ -2,6 +2,57 @@
package schema package schema
type ResourceModelServingAiGatewayGuardrailsInputPii struct {
Behavior string `json:"behavior"`
}
type ResourceModelServingAiGatewayGuardrailsInput struct {
InvalidKeywords []string `json:"invalid_keywords,omitempty"`
Safety bool `json:"safety,omitempty"`
ValidTopics []string `json:"valid_topics,omitempty"`
Pii *ResourceModelServingAiGatewayGuardrailsInputPii `json:"pii,omitempty"`
}
type ResourceModelServingAiGatewayGuardrailsOutputPii struct {
Behavior string `json:"behavior"`
}
type ResourceModelServingAiGatewayGuardrailsOutput struct {
InvalidKeywords []string `json:"invalid_keywords,omitempty"`
Safety bool `json:"safety,omitempty"`
ValidTopics []string `json:"valid_topics,omitempty"`
Pii *ResourceModelServingAiGatewayGuardrailsOutputPii `json:"pii,omitempty"`
}
type ResourceModelServingAiGatewayGuardrails struct {
Input *ResourceModelServingAiGatewayGuardrailsInput `json:"input,omitempty"`
Output *ResourceModelServingAiGatewayGuardrailsOutput `json:"output,omitempty"`
}
type ResourceModelServingAiGatewayInferenceTableConfig struct {
CatalogName string `json:"catalog_name,omitempty"`
Enabled bool `json:"enabled,omitempty"`
SchemaName string `json:"schema_name,omitempty"`
TableNamePrefix string `json:"table_name_prefix,omitempty"`
}
type ResourceModelServingAiGatewayRateLimits struct {
Calls int `json:"calls"`
Key string `json:"key,omitempty"`
RenewalPeriod string `json:"renewal_period"`
}
type ResourceModelServingAiGatewayUsageTrackingConfig struct {
Enabled bool `json:"enabled,omitempty"`
}
type ResourceModelServingAiGateway struct {
Guardrails *ResourceModelServingAiGatewayGuardrails `json:"guardrails,omitempty"`
InferenceTableConfig *ResourceModelServingAiGatewayInferenceTableConfig `json:"inference_table_config,omitempty"`
RateLimits []ResourceModelServingAiGatewayRateLimits `json:"rate_limits,omitempty"`
UsageTrackingConfig *ResourceModelServingAiGatewayUsageTrackingConfig `json:"usage_tracking_config,omitempty"`
}
type ResourceModelServingConfigAutoCaptureConfig struct { type ResourceModelServingConfigAutoCaptureConfig struct {
CatalogName string `json:"catalog_name,omitempty"` CatalogName string `json:"catalog_name,omitempty"`
Enabled bool `json:"enabled,omitempty"` Enabled bool `json:"enabled,omitempty"`
@ -139,6 +190,7 @@ type ResourceModelServing struct {
Name string `json:"name"` Name string `json:"name"`
RouteOptimized bool `json:"route_optimized,omitempty"` RouteOptimized bool `json:"route_optimized,omitempty"`
ServingEndpointId string `json:"serving_endpoint_id,omitempty"` ServingEndpointId string `json:"serving_endpoint_id,omitempty"`
AiGateway *ResourceModelServingAiGateway `json:"ai_gateway,omitempty"`
Config *ResourceModelServingConfig `json:"config,omitempty"` Config *ResourceModelServingConfig `json:"config,omitempty"`
RateLimits []ResourceModelServingRateLimits `json:"rate_limits,omitempty"` RateLimits []ResourceModelServingRateLimits `json:"rate_limits,omitempty"`
Tags []ResourceModelServingTags `json:"tags,omitempty"` Tags []ResourceModelServingTags `json:"tags,omitempty"`

View File

@ -23,5 +23,6 @@ type ResourceOnlineTable struct {
Name string `json:"name"` Name string `json:"name"`
Status []any `json:"status,omitempty"` Status []any `json:"status,omitempty"`
TableServingUrl string `json:"table_serving_url,omitempty"` TableServingUrl string `json:"table_serving_url,omitempty"`
UnityCatalogProvisioningState string `json:"unity_catalog_provisioning_state,omitempty"`
Spec *ResourceOnlineTableSpec `json:"spec,omitempty"` Spec *ResourceOnlineTableSpec `json:"spec,omitempty"`
} }

View File

@ -4,7 +4,7 @@ package schema
type ResourcePermissionsAccessControl struct { type ResourcePermissionsAccessControl struct {
GroupName string `json:"group_name,omitempty"` GroupName string `json:"group_name,omitempty"`
PermissionLevel string `json:"permission_level"` PermissionLevel string `json:"permission_level,omitempty"`
ServicePrincipalName string `json:"service_principal_name,omitempty"` ServicePrincipalName string `json:"service_principal_name,omitempty"`
UserName string `json:"user_name,omitempty"` UserName string `json:"user_name,omitempty"`
} }

View File

@ -142,10 +142,26 @@ type ResourcePipelineGatewayDefinition struct {
GatewayStorageSchema string `json:"gateway_storage_schema,omitempty"` GatewayStorageSchema string `json:"gateway_storage_schema,omitempty"`
} }
type ResourcePipelineIngestionDefinitionObjectsReportTableConfiguration struct {
PrimaryKeys []string `json:"primary_keys,omitempty"`
SalesforceIncludeFormulaFields bool `json:"salesforce_include_formula_fields,omitempty"`
ScdType string `json:"scd_type,omitempty"`
SequenceBy []string `json:"sequence_by,omitempty"`
}
type ResourcePipelineIngestionDefinitionObjectsReport struct {
DestinationCatalog string `json:"destination_catalog,omitempty"`
DestinationSchema string `json:"destination_schema,omitempty"`
DestinationTable string `json:"destination_table,omitempty"`
SourceUrl string `json:"source_url,omitempty"`
TableConfiguration *ResourcePipelineIngestionDefinitionObjectsReportTableConfiguration `json:"table_configuration,omitempty"`
}
type ResourcePipelineIngestionDefinitionObjectsSchemaTableConfiguration struct { type ResourcePipelineIngestionDefinitionObjectsSchemaTableConfiguration struct {
PrimaryKeys []string `json:"primary_keys,omitempty"` PrimaryKeys []string `json:"primary_keys,omitempty"`
SalesforceIncludeFormulaFields bool `json:"salesforce_include_formula_fields,omitempty"` SalesforceIncludeFormulaFields bool `json:"salesforce_include_formula_fields,omitempty"`
ScdType string `json:"scd_type,omitempty"` ScdType string `json:"scd_type,omitempty"`
SequenceBy []string `json:"sequence_by,omitempty"`
} }
type ResourcePipelineIngestionDefinitionObjectsSchema struct { type ResourcePipelineIngestionDefinitionObjectsSchema struct {
@ -160,6 +176,7 @@ type ResourcePipelineIngestionDefinitionObjectsTableTableConfiguration struct {
PrimaryKeys []string `json:"primary_keys,omitempty"` PrimaryKeys []string `json:"primary_keys,omitempty"`
SalesforceIncludeFormulaFields bool `json:"salesforce_include_formula_fields,omitempty"` SalesforceIncludeFormulaFields bool `json:"salesforce_include_formula_fields,omitempty"`
ScdType string `json:"scd_type,omitempty"` ScdType string `json:"scd_type,omitempty"`
SequenceBy []string `json:"sequence_by,omitempty"`
} }
type ResourcePipelineIngestionDefinitionObjectsTable struct { type ResourcePipelineIngestionDefinitionObjectsTable struct {
@ -173,6 +190,7 @@ type ResourcePipelineIngestionDefinitionObjectsTable struct {
} }
type ResourcePipelineIngestionDefinitionObjects struct { type ResourcePipelineIngestionDefinitionObjects struct {
Report *ResourcePipelineIngestionDefinitionObjectsReport `json:"report,omitempty"`
Schema *ResourcePipelineIngestionDefinitionObjectsSchema `json:"schema,omitempty"` Schema *ResourcePipelineIngestionDefinitionObjectsSchema `json:"schema,omitempty"`
Table *ResourcePipelineIngestionDefinitionObjectsTable `json:"table,omitempty"` Table *ResourcePipelineIngestionDefinitionObjectsTable `json:"table,omitempty"`
} }
@ -181,6 +199,7 @@ type ResourcePipelineIngestionDefinitionTableConfiguration struct {
PrimaryKeys []string `json:"primary_keys,omitempty"` PrimaryKeys []string `json:"primary_keys,omitempty"`
SalesforceIncludeFormulaFields bool `json:"salesforce_include_formula_fields,omitempty"` SalesforceIncludeFormulaFields bool `json:"salesforce_include_formula_fields,omitempty"`
ScdType string `json:"scd_type,omitempty"` ScdType string `json:"scd_type,omitempty"`
SequenceBy []string `json:"sequence_by,omitempty"`
} }
type ResourcePipelineIngestionDefinition struct { type ResourcePipelineIngestionDefinition struct {
@ -238,6 +257,7 @@ type ResourcePipelineTrigger struct {
type ResourcePipeline struct { type ResourcePipeline struct {
AllowDuplicateNames bool `json:"allow_duplicate_names,omitempty"` AllowDuplicateNames bool `json:"allow_duplicate_names,omitempty"`
BudgetPolicyId string `json:"budget_policy_id,omitempty"`
Catalog string `json:"catalog,omitempty"` Catalog string `json:"catalog,omitempty"`
Cause string `json:"cause,omitempty"` Cause string `json:"cause,omitempty"`
Channel string `json:"channel,omitempty"` Channel string `json:"channel,omitempty"`
@ -254,6 +274,7 @@ type ResourcePipeline struct {
Name string `json:"name,omitempty"` Name string `json:"name,omitempty"`
Photon bool `json:"photon,omitempty"` Photon bool `json:"photon,omitempty"`
RunAsUserName string `json:"run_as_user_name,omitempty"` RunAsUserName string `json:"run_as_user_name,omitempty"`
Schema string `json:"schema,omitempty"`
Serverless bool `json:"serverless,omitempty"` Serverless bool `json:"serverless,omitempty"`
State string `json:"state,omitempty"` State string `json:"state,omitempty"`
Storage string `json:"storage,omitempty"` Storage string `json:"storage,omitempty"`

View File

@ -4,9 +4,11 @@ package schema
type ResourceSqlTableColumn struct { type ResourceSqlTableColumn struct {
Comment string `json:"comment,omitempty"` Comment string `json:"comment,omitempty"`
Identity string `json:"identity,omitempty"`
Name string `json:"name"` Name string `json:"name"`
Nullable bool `json:"nullable,omitempty"` Nullable bool `json:"nullable,omitempty"`
Type string `json:"type,omitempty"` Type string `json:"type,omitempty"`
TypeJson string `json:"type_json,omitempty"`
} }
type ResourceSqlTable struct { type ResourceSqlTable struct {

View File

@ -10,6 +10,7 @@ type Resources struct {
AzureAdlsGen1Mount map[string]any `json:"databricks_azure_adls_gen1_mount,omitempty"` AzureAdlsGen1Mount map[string]any `json:"databricks_azure_adls_gen1_mount,omitempty"`
AzureAdlsGen2Mount map[string]any `json:"databricks_azure_adls_gen2_mount,omitempty"` AzureAdlsGen2Mount map[string]any `json:"databricks_azure_adls_gen2_mount,omitempty"`
AzureBlobMount map[string]any `json:"databricks_azure_blob_mount,omitempty"` AzureBlobMount map[string]any `json:"databricks_azure_blob_mount,omitempty"`
Budget map[string]any `json:"databricks_budget,omitempty"`
Catalog map[string]any `json:"databricks_catalog,omitempty"` Catalog map[string]any `json:"databricks_catalog,omitempty"`
CatalogWorkspaceBinding map[string]any `json:"databricks_catalog_workspace_binding,omitempty"` CatalogWorkspaceBinding map[string]any `json:"databricks_catalog_workspace_binding,omitempty"`
Cluster map[string]any `json:"databricks_cluster,omitempty"` Cluster map[string]any `json:"databricks_cluster,omitempty"`
@ -112,6 +113,7 @@ func NewResources() *Resources {
AzureAdlsGen1Mount: make(map[string]any), AzureAdlsGen1Mount: make(map[string]any),
AzureAdlsGen2Mount: make(map[string]any), AzureAdlsGen2Mount: make(map[string]any),
AzureBlobMount: make(map[string]any), AzureBlobMount: make(map[string]any),
Budget: make(map[string]any),
Catalog: make(map[string]any), Catalog: make(map[string]any),
CatalogWorkspaceBinding: make(map[string]any), CatalogWorkspaceBinding: make(map[string]any),
Cluster: make(map[string]any), Cluster: make(map[string]any),

View File

@ -21,7 +21,7 @@ type Root struct {
const ProviderHost = "registry.terraform.io" const ProviderHost = "registry.terraform.io"
const ProviderSource = "databricks/databricks" const ProviderSource = "databricks/databricks"
const ProviderVersion = "1.52.0" const ProviderVersion = "1.54.0"
func NewRoot() *Root { func NewRoot() *Root {
return &Root{ return &Root{

View File

@ -57,6 +57,12 @@ func IsLibraryLocal(dep string) bool {
} }
} }
// If the dependency starts with --, it's a pip flag option which is a valid
// entry for environment dependencies but not a local path
if containsPipFlag(dep) {
return false
}
// If the dependency is a requirements file, it's not a valid local path // If the dependency is a requirements file, it's not a valid local path
if strings.HasPrefix(dep, "-r") { if strings.HasPrefix(dep, "-r") {
return false return false
@ -70,6 +76,11 @@ func IsLibraryLocal(dep string) bool {
return IsLocalPath(dep) return IsLocalPath(dep)
} }
func containsPipFlag(input string) bool {
re := regexp.MustCompile(`--[a-zA-Z0-9-]+`)
return re.MatchString(input)
}
// ^[a-zA-Z0-9\-_]+: Matches the package name, allowing alphanumeric characters, dashes (-), and underscores (_). // ^[a-zA-Z0-9\-_]+: Matches the package name, allowing alphanumeric characters, dashes (-), and underscores (_).
// \[.*\])?: Optionally matches any extras specified in square brackets, e.g., [security]. // \[.*\])?: Optionally matches any extras specified in square brackets, e.g., [security].
// ((==|!=|<=|>=|~=|>|<)\d+(\.\d+){0,2}(\.\*)?): Optionally matches version specifiers, supporting various operators (==, !=, etc.) followed by a version number (e.g., 2.25.1). // ((==|!=|<=|>=|~=|>|<)\d+(\.\d+){0,2}(\.\*)?): Optionally matches version specifiers, supporting various operators (==, !=, etc.) followed by a version number (e.g., 2.25.1).

View File

@ -36,3 +36,13 @@ func IsWorkspaceLibrary(library *compute.Library) bool {
return IsWorkspacePath(path) return IsWorkspacePath(path)
} }
// IsVolumesPath returns true if the specified path indicates that
// it should be interpreted as a Databricks Volumes path.
func IsVolumesPath(path string) bool {
return strings.HasPrefix(path, "/Volumes/")
}
func IsWorkspaceSharedPath(path string) bool {
return strings.HasPrefix(path, "/Workspace/Shared/")
}

View File

@ -31,3 +31,13 @@ func TestIsWorkspaceLibrary(t *testing.T) {
// Empty. // Empty.
assert.False(t, IsWorkspaceLibrary(&compute.Library{})) assert.False(t, IsWorkspaceLibrary(&compute.Library{}))
} }
func TestIsVolumesPath(t *testing.T) {
// Absolute paths with particular prefixes.
assert.True(t, IsVolumesPath("/Volumes/path/to/package"))
// Relative paths.
assert.False(t, IsVolumesPath("myfile.txt"))
assert.False(t, IsVolumesPath("./myfile.txt"))
assert.False(t, IsVolumesPath("../myfile.txt"))
}

39
bundle/paths/paths.go Normal file
View File

@ -0,0 +1,39 @@
package paths
import (
"strings"
"github.com/databricks/cli/bundle/config"
"github.com/databricks/cli/bundle/libraries"
)
func CollectUniqueWorkspacePathPrefixes(workspace config.Workspace) []string {
rootPath := workspace.RootPath
paths := []string{}
if !libraries.IsVolumesPath(rootPath) && !libraries.IsWorkspaceSharedPath(rootPath) {
paths = append(paths, rootPath)
}
if !strings.HasSuffix(rootPath, "/") {
rootPath += "/"
}
for _, p := range []string{
workspace.ArtifactPath,
workspace.FilePath,
workspace.StatePath,
workspace.ResourcePath,
} {
if libraries.IsWorkspaceSharedPath(p) || libraries.IsVolumesPath(p) {
continue
}
if strings.HasPrefix(p, rootPath) {
continue
}
paths = append(paths, p)
}
return paths
}

View File

@ -39,6 +39,10 @@ var levelsMap = map[string](map[string]string){
CAN_VIEW: "CAN_VIEW", CAN_VIEW: "CAN_VIEW",
CAN_RUN: "CAN_QUERY", CAN_RUN: "CAN_QUERY",
}, },
"dashboards": {
CAN_MANAGE: "CAN_MANAGE",
CAN_VIEW: "CAN_READ",
},
} }
type bundlePermissions struct{} type bundlePermissions struct{}

View File

@ -0,0 +1,110 @@
package permissions
import (
"context"
"fmt"
"sort"
"strings"
"github.com/databricks/cli/bundle"
"github.com/databricks/cli/libs/diag"
"github.com/databricks/cli/libs/dyn"
"github.com/databricks/cli/libs/set"
)
type permissionDiagnostics struct{}
func PermissionDiagnostics() bundle.Mutator {
return &permissionDiagnostics{}
}
func (m *permissionDiagnostics) Name() string {
return "CheckPermissions"
}
func (m *permissionDiagnostics) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
if len(b.Config.Permissions) == 0 {
// Only warn if there is an explicit top-level permissions section
return nil
}
canManageBundle, _ := analyzeBundlePermissions(b)
if canManageBundle {
return nil
}
return diag.Diagnostics{{
Severity: diag.Warning,
Summary: fmt.Sprintf("permissions section should include %s or one of their groups with CAN_MANAGE permissions", b.Config.Workspace.CurrentUser.UserName),
Locations: []dyn.Location{b.Config.GetLocation("permissions")},
ID: diag.PermissionNotIncluded,
}}
}
// analyzeBundlePermissions analyzes the top-level permissions of the bundle.
// This permission set is important since it determines the permissions of the
// target workspace folder.
//
// Returns:
// - isManager: true if the current user is can manage the bundle resources.
// - assistance: advice on who to contact as to manage this project
func analyzeBundlePermissions(b *bundle.Bundle) (bool, string) {
canManageBundle := false
otherManagers := set.NewSet[string]()
if b.Config.RunAs != nil && b.Config.RunAs.UserName != "" && b.Config.RunAs.UserName != b.Config.Workspace.CurrentUser.UserName {
// The run_as user is another human that could be contacted
// about this bundle.
otherManagers.Add(b.Config.RunAs.UserName)
}
currentUser := b.Config.Workspace.CurrentUser.UserName
targetPermissions := b.Config.Permissions
for _, p := range targetPermissions {
if p.Level != CAN_MANAGE {
continue
}
if p.UserName == currentUser || p.ServicePrincipalName == currentUser {
canManageBundle = true
continue
}
if isGroupOfCurrentUser(b, p.GroupName) {
canManageBundle = true
continue
}
// Permission doesn't apply to current user; add to otherManagers
otherManager := p.UserName
if otherManager == "" {
otherManager = p.GroupName
}
if otherManager == "" {
// Skip service principals
continue
}
otherManagers.Add(otherManager)
}
assistance := "For assistance, contact the owners of this project."
if otherManagers.Size() > 0 {
list := otherManagers.Values()
sort.Strings(list)
assistance = fmt.Sprintf(
"For assistance, users or groups with appropriate permissions may include: %s.",
strings.Join(list, ", "),
)
}
return canManageBundle, assistance
}
func isGroupOfCurrentUser(b *bundle.Bundle, groupName string) bool {
currentUserGroups := b.Config.Workspace.CurrentUser.User.Groups
for _, g := range currentUserGroups {
if g.Display == groupName {
return true
}
}
return false
}

View File

@ -0,0 +1,52 @@
package permissions_test
import (
"context"
"testing"
"github.com/databricks/cli/bundle"
"github.com/databricks/cli/bundle/config"
"github.com/databricks/cli/bundle/config/resources"
"github.com/databricks/cli/bundle/permissions"
"github.com/databricks/cli/libs/diag"
"github.com/databricks/databricks-sdk-go/service/iam"
"github.com/stretchr/testify/require"
)
func TestPermissionDiagnosticsApplySuccess(t *testing.T) {
b := mockBundle([]resources.Permission{
{Level: "CAN_MANAGE", UserName: "testuser@databricks.com"},
})
diags := permissions.PermissionDiagnostics().Apply(context.Background(), b)
require.NoError(t, diags.Error())
}
func TestPermissionDiagnosticsApplyFail(t *testing.T) {
b := mockBundle([]resources.Permission{
{Level: "CAN_VIEW", UserName: "testuser@databricks.com"},
})
diags := permissions.PermissionDiagnostics().Apply(context.Background(), b)
require.Equal(t, diags[0].Severity, diag.Warning)
require.Contains(t, diags[0].Summary, "permissions section should include testuser@databricks.com or one of their groups with CAN_MANAGE permissions")
}
func mockBundle(permissions []resources.Permission) *bundle.Bundle {
return &bundle.Bundle{
Config: config.Root{
Workspace: config.Workspace{
CurrentUser: &config.User{
User: &iam.User{
UserName: "testuser@databricks.com",
DisplayName: "Test User",
Groups: []iam.ComplexValue{
{Display: "testgroup"},
},
},
},
},
Permissions: permissions,
},
}
}

Some files were not shown because too many files have changed in this diff Show More