This commit is contained in:
Shreyas Goenka 2024-10-29 11:38:42 +01:00
commit 8a2fe4969c
No known key found for this signature in database
GPG Key ID: 92A07DF49CCB0622
146 changed files with 6067 additions and 706 deletions

View File

@ -11,6 +11,7 @@
"toolchain": {
"required": ["go"],
"post_generate": [
"go test -timeout 240s -run TestConsistentDatabricksSdkVersion github.com/databricks/cli/internal/build",
"go run ./bundle/internal/schema/*.go ./bundle/schema/jsonschema.json",
"echo 'bundle/internal/tf/schema/\\*.go linguist-generated=true' >> ./.gitattributes",
"echo 'go.sum linguist-generated=true' >> ./.gitattributes",

View File

@ -1 +1 @@
0c86ea6dbd9a730c24ff0d4e509603e476955ac5
cf9c61453990df0f9453670f2fe68e1b128647a2

1
.gitattributes vendored
View File

@ -54,6 +54,7 @@ cmd/workspace/dashboards/dashboards.go linguist-generated=true
cmd/workspace/data-sources/data-sources.go linguist-generated=true
cmd/workspace/default-namespace/default-namespace.go linguist-generated=true
cmd/workspace/disable-legacy-access/disable-legacy-access.go linguist-generated=true
cmd/workspace/disable-legacy-dbfs/disable-legacy-dbfs.go linguist-generated=true
cmd/workspace/enhanced-security-monitoring/enhanced-security-monitoring.go linguist-generated=true
cmd/workspace/experiments/experiments.go linguist-generated=true
cmd/workspace/external-locations/external-locations.go linguist-generated=true

60
.github/workflows/integration-tests.yml vendored Normal file
View File

@ -0,0 +1,60 @@
name: integration
on:
pull_request:
types: [opened, synchronize]
merge_group:
jobs:
trigger-tests:
if: github.event_name == 'pull_request'
runs-on: ubuntu-latest
environment: "test-trigger-is"
steps:
- uses: actions/checkout@v4
- name: Generate GitHub App Token
id: generate-token
uses: actions/create-github-app-token@v1
with:
app-id: ${{ secrets.DECO_WORKFLOW_TRIGGER_APP_ID }}
private-key: ${{ secrets.DECO_WORKFLOW_TRIGGER_PRIVATE_KEY }}
owner: ${{ secrets.ORG_NAME }}
repositories: ${{secrets.REPO_NAME}}
- name: Trigger Workflow in Another Repo
env:
GH_TOKEN: ${{ steps.generate-token.outputs.token }}
run: |
gh workflow run cli-isolated-pr.yml -R ${{ secrets.ORG_NAME }}/${{secrets.REPO_NAME}} \
--ref main \
-f pull_request_number=${{ github.event.pull_request.number }} \
-f commit_sha=${{ github.event.pull_request.head.sha }}
# Statuses and checks apply to specific commits (by hash).
# Enforcement of required checks is done both at the PR level and the merge queue level.
# In case of multiple commits in a single PR, the hash of the squashed commit
# will not match the one for the latest (approved) commit in the PR.
# We auto approve the check for the merge queue for two reasons:
# * Queue times out due to duration of tests.
# * Avoid running integration tests twice, since it was already run at the tip of the branch before squashing.
auto-approve:
if: github.event_name == 'merge_group'
runs-on: ubuntu-latest
steps:
- name: Mark Check
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
shell: bash
run: |
gh api -X POST -H "Accept: application/vnd.github+json" \
-H "X-GitHub-Api-Version: 2022-11-28" \
/repos/${{ github.repository }}/statuses/${{ github.sha }} \
-f 'state=success' \
-f 'context=Integration Tests Check'

View File

@ -1,5 +1,43 @@
# Version changelog
## [Release] Release v0.231.0
CLI:
* Added JSON input validation for CLI commands ([#1771](https://github.com/databricks/cli/pull/1771)).
* Support Git worktrees for `sync` ([#1831](https://github.com/databricks/cli/pull/1831)).
Bundles:
* Add `bundle summary` to display URLs for deployed resources ([#1731](https://github.com/databricks/cli/pull/1731)).
* Added a warning when incorrect permissions used for `/Workspace/Shared` bundle root ([#1821](https://github.com/databricks/cli/pull/1821)).
* Show actionable errors for collaborative deployment scenarios ([#1386](https://github.com/databricks/cli/pull/1386)).
* Fix path to repository-wide exclude file ([#1837](https://github.com/databricks/cli/pull/1837)).
* Fixed typo in converting cluster permissions ([#1826](https://github.com/databricks/cli/pull/1826)).
* Ignore metastore permission error during template generation ([#1819](https://github.com/databricks/cli/pull/1819)).
* Handle normalization of `dyn.KindTime` into an any type ([#1836](https://github.com/databricks/cli/pull/1836)).
* Added support for pip options in environment dependencies ([#1842](https://github.com/databricks/cli/pull/1842)).
* Fix race condition when restarting continuous jobs ([#1849](https://github.com/databricks/cli/pull/1849)).
* Fix pipeline in default-python template not working for certain workspaces ([#1854](https://github.com/databricks/cli/pull/1854)).
* Add "output" flag to the bundle sync command ([#1853](https://github.com/databricks/cli/pull/1853)).
Internal:
* Move utility functions dealing with IAM to libs/iamutil ([#1820](https://github.com/databricks/cli/pull/1820)).
* Remove unused `IS_OWNER` constant ([#1823](https://github.com/databricks/cli/pull/1823)).
* Assert SDK version is consistent in the CLI generation process ([#1814](https://github.com/databricks/cli/pull/1814)).
* Fixed unmarshalling json input into `interface{}` type ([#1832](https://github.com/databricks/cli/pull/1832)).
* Fix `TestAccFsMkdirWhenFileExistsAtPath` in isolated Azure environments ([#1833](https://github.com/databricks/cli/pull/1833)).
* Add behavioral tests for examples from the YAML spec ([#1835](https://github.com/databricks/cli/pull/1835)).
* Remove Terraform conversion function that's no longer used ([#1840](https://github.com/databricks/cli/pull/1840)).
* Encode assumptions about the dashboards API in a test ([#1839](https://github.com/databricks/cli/pull/1839)).
* Add script to make testing of code on branches easier ([#1844](https://github.com/databricks/cli/pull/1844)).
API Changes:
* Added `databricks disable-legacy-dbfs` command group.
OpenAPI commit cf9c61453990df0f9453670f2fe68e1b128647a2 (2024-10-14)
Dependency updates:
* Upgrade TF provider to 1.54.0 ([#1852](https://github.com/databricks/cli/pull/1852)).
* Bump github.com/databricks/databricks-sdk-go from 0.48.0 to 0.49.0 ([#1843](https://github.com/databricks/cli/pull/1843)).
## [Release] Release v0.230.0
Notable changes for Databricks Asset Bundles:

View File

@ -35,3 +35,6 @@ docker run -e DATABRICKS_HOST=$YOUR_HOST_URL -e DATABRICKS_TOKEN=$YOUR_TOKEN ghc
This CLI follows the Databricks Unified Authentication principles.
You can find a detailed description at https://github.com/databricks/databricks-sdk-go#authentication.
## Privacy Notice
Databricks CLI use is subject to the [Databricks License](https://github.com/databricks/cli/blob/main/LICENSE) and [Databricks Privacy Notice](https://www.databricks.com/legal/privacynotice), including any Usage Data provisions.

View File

@ -238,6 +238,15 @@ func (m *applyPresets) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnos
}
}
// Dashboards: Prefix
for key, dashboard := range r.Dashboards {
if dashboard == nil || dashboard.CreateDashboardRequest == nil {
diags = diags.Extend(diag.Errorf("dashboard %s s is not defined", key))
continue
}
dashboard.DisplayName = prefix + dashboard.DisplayName
}
return diags
}

View File

@ -0,0 +1,70 @@
package mutator
import (
"context"
"github.com/databricks/cli/bundle"
"github.com/databricks/cli/libs/diag"
"github.com/databricks/cli/libs/dyn"
)
type configureDashboardDefaults struct{}
func ConfigureDashboardDefaults() bundle.Mutator {
return &configureDashboardDefaults{}
}
func (m *configureDashboardDefaults) Name() string {
return "ConfigureDashboardDefaults"
}
func (m *configureDashboardDefaults) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
var diags diag.Diagnostics
pattern := dyn.NewPattern(
dyn.Key("resources"),
dyn.Key("dashboards"),
dyn.AnyKey(),
)
// Configure defaults for all dashboards.
err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) {
return dyn.MapByPattern(v, pattern, func(p dyn.Path, v dyn.Value) (dyn.Value, error) {
var err error
v, err = setIfNotExists(v, dyn.NewPath(dyn.Key("parent_path")), dyn.V(b.Config.Workspace.ResourcePath))
if err != nil {
return dyn.InvalidValue, err
}
v, err = setIfNotExists(v, dyn.NewPath(dyn.Key("embed_credentials")), dyn.V(false))
if err != nil {
return dyn.InvalidValue, err
}
return v, nil
})
})
diags = diags.Extend(diag.FromErr(err))
return diags
}
func setIfNotExists(v dyn.Value, path dyn.Path, defaultValue dyn.Value) (dyn.Value, error) {
// Get the field at the specified path (if set).
_, err := dyn.GetByPath(v, path)
switch {
case dyn.IsNoSuchKeyError(err):
// OK, we'll set the default value.
break
case dyn.IsCannotTraverseNilError(err):
// Cannot traverse the value, skip it.
return v, nil
case err == nil:
// The field is set, skip it.
return v, nil
default:
// Return the error.
return v, err
}
// Set the field at the specified path.
return dyn.SetByPath(v, path, defaultValue)
}

View File

@ -0,0 +1,130 @@
package mutator_test
import (
"context"
"testing"
"github.com/databricks/cli/bundle"
"github.com/databricks/cli/bundle/config"
"github.com/databricks/cli/bundle/config/mutator"
"github.com/databricks/cli/bundle/config/resources"
"github.com/databricks/cli/bundle/internal/bundletest"
"github.com/databricks/cli/libs/dyn"
"github.com/databricks/databricks-sdk-go/service/dashboards"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestConfigureDashboardDefaultsParentPath(t *testing.T) {
b := &bundle.Bundle{
Config: config.Root{
Workspace: config.Workspace{
ResourcePath: "/foo/bar",
},
Resources: config.Resources{
Dashboards: map[string]*resources.Dashboard{
"d1": {
// Empty string is skipped.
// See below for how it is set.
CreateDashboardRequest: &dashboards.CreateDashboardRequest{
ParentPath: "",
},
},
"d2": {
// Non-empty string is skipped.
CreateDashboardRequest: &dashboards.CreateDashboardRequest{
ParentPath: "already-set",
},
},
"d3": {
// No parent path set.
},
"d4": nil,
},
},
},
}
// We can't set an empty string in the typed configuration.
// Do it on the dyn.Value directly.
bundletest.Mutate(t, b, func(v dyn.Value) (dyn.Value, error) {
return dyn.Set(v, "resources.dashboards.d1.parent_path", dyn.V(""))
})
diags := bundle.Apply(context.Background(), b, mutator.ConfigureDashboardDefaults())
require.NoError(t, diags.Error())
var v dyn.Value
var err error
// Set to empty string; unchanged.
v, err = dyn.Get(b.Config.Value(), "resources.dashboards.d1.parent_path")
if assert.NoError(t, err) {
assert.Equal(t, "", v.MustString())
}
// Set to "already-set"; unchanged.
v, err = dyn.Get(b.Config.Value(), "resources.dashboards.d2.parent_path")
if assert.NoError(t, err) {
assert.Equal(t, "already-set", v.MustString())
}
// Not set; now set to the workspace resource path.
v, err = dyn.Get(b.Config.Value(), "resources.dashboards.d3.parent_path")
if assert.NoError(t, err) {
assert.Equal(t, "/foo/bar", v.MustString())
}
// No valid dashboard; no change.
_, err = dyn.Get(b.Config.Value(), "resources.dashboards.d4.parent_path")
assert.True(t, dyn.IsCannotTraverseNilError(err))
}
func TestConfigureDashboardDefaultsEmbedCredentials(t *testing.T) {
b := &bundle.Bundle{
Config: config.Root{
Resources: config.Resources{
Dashboards: map[string]*resources.Dashboard{
"d1": {
EmbedCredentials: true,
},
"d2": {
EmbedCredentials: false,
},
"d3": {
// No parent path set.
},
"d4": nil,
},
},
},
}
diags := bundle.Apply(context.Background(), b, mutator.ConfigureDashboardDefaults())
require.NoError(t, diags.Error())
var v dyn.Value
var err error
// Set to true; still true.
v, err = dyn.Get(b.Config.Value(), "resources.dashboards.d1.embed_credentials")
if assert.NoError(t, err) {
assert.Equal(t, true, v.MustBool())
}
// Set to false; still false.
v, err = dyn.Get(b.Config.Value(), "resources.dashboards.d2.embed_credentials")
if assert.NoError(t, err) {
assert.Equal(t, false, v.MustBool())
}
// Not set; now false.
v, err = dyn.Get(b.Config.Value(), "resources.dashboards.d3.embed_credentials")
if assert.NoError(t, err) {
assert.Equal(t, false, v.MustBool())
}
// No valid dashboard; no change.
_, err = dyn.Get(b.Config.Value(), "resources.dashboards.d4.embed_credentials")
assert.True(t, dyn.IsCannotTraverseNilError(err))
}

View File

@ -0,0 +1,65 @@
package mutator
import (
"context"
"net/url"
"strconv"
"strings"
"github.com/databricks/cli/bundle"
"github.com/databricks/cli/libs/diag"
)
type initializeURLs struct {
}
// InitializeURLs makes sure the URL field of each resource is configured.
// NOTE: since this depends on an extra API call, this mutator adds some extra
// latency. As such, it should only be used when needed.
// This URL field is used for the output of the 'bundle summary' CLI command.
func InitializeURLs() bundle.Mutator {
return &initializeURLs{}
}
func (m *initializeURLs) Name() string {
return "InitializeURLs"
}
func (m *initializeURLs) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
workspaceId, err := b.WorkspaceClient().CurrentWorkspaceID(ctx)
if err != nil {
return diag.FromErr(err)
}
orgId := strconv.FormatInt(workspaceId, 10)
host := b.WorkspaceClient().Config.CanonicalHostName()
initializeForWorkspace(b, orgId, host)
return nil
}
func initializeForWorkspace(b *bundle.Bundle, orgId string, host string) error {
baseURL, err := url.Parse(host)
if err != nil {
return err
}
// Add ?o=<workspace id> only if <workspace id> wasn't in the subdomain already.
// The ?o= is needed when vanity URLs / legacy workspace URLs are used.
// If it's not needed we prefer to leave it out since these URLs are rather
// long for most terminals.
//
// See https://docs.databricks.com/en/workspace/workspace-details.html for
// further reading about the '?o=' suffix.
if !strings.Contains(baseURL.Hostname(), orgId) {
values := baseURL.Query()
values.Add("o", orgId)
baseURL.RawQuery = values.Encode()
}
for _, group := range b.Config.Resources.AllResources() {
for _, r := range group.Resources {
r.InitializeURL(*baseURL)
}
}
return nil
}

View File

@ -0,0 +1,140 @@
package mutator
import (
"testing"
"github.com/databricks/cli/bundle"
"github.com/databricks/cli/bundle/config"
"github.com/databricks/cli/bundle/config/resources"
"github.com/databricks/databricks-sdk-go/service/catalog"
"github.com/databricks/databricks-sdk-go/service/compute"
"github.com/databricks/databricks-sdk-go/service/dashboards"
"github.com/databricks/databricks-sdk-go/service/jobs"
"github.com/databricks/databricks-sdk-go/service/ml"
"github.com/databricks/databricks-sdk-go/service/pipelines"
"github.com/databricks/databricks-sdk-go/service/serving"
"github.com/stretchr/testify/require"
)
func TestInitializeURLs(t *testing.T) {
b := &bundle.Bundle{
Config: config.Root{
Workspace: config.Workspace{
Host: "https://mycompany.databricks.com/",
},
Resources: config.Resources{
Jobs: map[string]*resources.Job{
"job1": {
ID: "1",
JobSettings: &jobs.JobSettings{Name: "job1"},
},
},
Pipelines: map[string]*resources.Pipeline{
"pipeline1": {
ID: "3",
PipelineSpec: &pipelines.PipelineSpec{Name: "pipeline1"},
},
},
Experiments: map[string]*resources.MlflowExperiment{
"experiment1": {
ID: "4",
Experiment: &ml.Experiment{Name: "experiment1"},
},
},
Models: map[string]*resources.MlflowModel{
"model1": {
ID: "a model uses its name for identifier",
Model: &ml.Model{Name: "a model uses its name for identifier"},
},
},
ModelServingEndpoints: map[string]*resources.ModelServingEndpoint{
"servingendpoint1": {
ID: "my_serving_endpoint",
CreateServingEndpoint: &serving.CreateServingEndpoint{
Name: "my_serving_endpoint",
},
},
},
RegisteredModels: map[string]*resources.RegisteredModel{
"registeredmodel1": {
ID: "8",
CreateRegisteredModelRequest: &catalog.CreateRegisteredModelRequest{
Name: "my_registered_model",
},
},
},
QualityMonitors: map[string]*resources.QualityMonitor{
"qualityMonitor1": {
CreateMonitor: &catalog.CreateMonitor{
TableName: "catalog.schema.qualityMonitor1",
},
},
},
Schemas: map[string]*resources.Schema{
"schema1": {
ID: "catalog.schema",
CreateSchema: &catalog.CreateSchema{
Name: "schema",
},
},
},
Clusters: map[string]*resources.Cluster{
"cluster1": {
ID: "1017-103929-vlr7jzcf",
ClusterSpec: &compute.ClusterSpec{
ClusterName: "cluster1",
},
},
},
Dashboards: map[string]*resources.Dashboard{
"dashboard1": {
ID: "01ef8d56871e1d50ae30ce7375e42478",
CreateDashboardRequest: &dashboards.CreateDashboardRequest{
DisplayName: "My special dashboard",
},
},
},
},
},
}
expectedURLs := map[string]string{
"job1": "https://mycompany.databricks.com/jobs/1?o=123456",
"pipeline1": "https://mycompany.databricks.com/pipelines/3?o=123456",
"experiment1": "https://mycompany.databricks.com/ml/experiments/4?o=123456",
"model1": "https://mycompany.databricks.com/ml/models/a%20model%20uses%20its%20name%20for%20identifier?o=123456",
"servingendpoint1": "https://mycompany.databricks.com/ml/endpoints/my_serving_endpoint?o=123456",
"registeredmodel1": "https://mycompany.databricks.com/explore/data/models/8?o=123456",
"qualityMonitor1": "https://mycompany.databricks.com/explore/data/catalog/schema/qualityMonitor1?o=123456",
"schema1": "https://mycompany.databricks.com/explore/data/catalog/schema?o=123456",
"cluster1": "https://mycompany.databricks.com/compute/clusters/1017-103929-vlr7jzcf?o=123456",
"dashboard1": "https://mycompany.databricks.com/dashboardsv3/01ef8d56871e1d50ae30ce7375e42478/published?o=123456",
}
initializeForWorkspace(b, "123456", "https://mycompany.databricks.com/")
for _, group := range b.Config.Resources.AllResources() {
for key, r := range group.Resources {
require.Equal(t, expectedURLs[key], r.GetURL(), "Unexpected URL for "+key)
}
}
}
func TestInitializeURLsWithoutOrgId(t *testing.T) {
b := &bundle.Bundle{
Config: config.Root{
Resources: config.Resources{
Jobs: map[string]*resources.Job{
"job1": {
ID: "1",
JobSettings: &jobs.JobSettings{Name: "job1"},
},
},
},
},
}
initializeForWorkspace(b, "123456", "https://adb-123456.azuredatabricks.net/")
require.Equal(t, "https://adb-123456.azuredatabricks.net/jobs/1", b.Config.Resources.Jobs["job1"].URL)
}

View File

@ -14,6 +14,7 @@ import (
sdkconfig "github.com/databricks/databricks-sdk-go/config"
"github.com/databricks/databricks-sdk-go/service/catalog"
"github.com/databricks/databricks-sdk-go/service/compute"
"github.com/databricks/databricks-sdk-go/service/dashboards"
"github.com/databricks/databricks-sdk-go/service/iam"
"github.com/databricks/databricks-sdk-go/service/jobs"
"github.com/databricks/databricks-sdk-go/service/ml"
@ -126,6 +127,13 @@ func mockBundle(mode config.Mode) *bundle.Bundle {
Clusters: map[string]*resources.Cluster{
"cluster1": {ClusterSpec: &compute.ClusterSpec{ClusterName: "cluster1", SparkVersion: "13.2.x", NumWorkers: 1}},
},
Dashboards: map[string]*resources.Dashboard{
"dashboard1": {
CreateDashboardRequest: &dashboards.CreateDashboardRequest{
DisplayName: "dashboard1",
},
},
},
},
},
// Use AWS implementation for testing.
@ -187,6 +195,9 @@ func TestProcessTargetModeDevelopment(t *testing.T) {
// Clusters
assert.Equal(t, "[dev lennart] cluster1", b.Config.Resources.Clusters["cluster1"].ClusterName)
// Dashboards
assert.Equal(t, "[dev lennart] dashboard1", b.Config.Resources.Dashboards["dashboard1"].DisplayName)
}
func TestProcessTargetModeDevelopmentTagNormalizationForAws(t *testing.T) {

View File

@ -110,6 +110,16 @@ func validateRunAs(b *bundle.Bundle) diag.Diagnostics {
))
}
// Dashboards do not support run_as in the API.
if len(b.Config.Resources.Dashboards) > 0 {
diags = diags.Extend(reportRunAsNotSupported(
"dashboards",
b.Config.GetLocation("resources.dashboards"),
b.Config.Workspace.CurrentUser.UserName,
identity,
))
}
return diags
}

View File

@ -33,6 +33,7 @@ func allResourceTypes(t *testing.T) []string {
// also update this check when adding a new resource
require.Equal(t, []string{
"clusters",
"dashboards",
"experiments",
"jobs",
"model_serving_endpoints",
@ -190,6 +191,7 @@ func TestRunAsErrorForUnsupportedResources(t *testing.T) {
Config: *r,
}
diags := bundle.Apply(context.Background(), b, SetRunAs())
require.Error(t, diags.Error())
assert.Contains(t, diags.Error().Error(), "do not support a setting a run_as user that is different from the owner.\n"+
"Current identity: alice. Run as identity: bob.\n"+
"See https://docs.databricks.com/dev-tools/bundles/run-as.html to learn more about the run_as property.", rt)

View File

@ -162,6 +162,20 @@ func (t *translateContext) translateNoOp(literal, localFullPath, localRelPath, r
return localRelPath, nil
}
func (t *translateContext) retainLocalAbsoluteFilePath(literal, localFullPath, localRelPath, remotePath string) (string, error) {
info, err := t.b.SyncRoot.Stat(localRelPath)
if errors.Is(err, fs.ErrNotExist) {
return "", fmt.Errorf("file %s not found", literal)
}
if err != nil {
return "", fmt.Errorf("unable to determine if %s is a file: %w", localFullPath, err)
}
if info.IsDir() {
return "", fmt.Errorf("expected %s to be a file but found a directory", literal)
}
return localFullPath, nil
}
func (t *translateContext) translateNoOpWithPrefix(literal, localFullPath, localRelPath, remotePath string) (string, error) {
if !strings.HasPrefix(localRelPath, ".") {
localRelPath = "." + string(filepath.Separator) + localRelPath
@ -215,6 +229,7 @@ func (m *translatePaths) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnos
t.applyJobTranslations,
t.applyPipelineTranslations,
t.applyArtifactTranslations,
t.applyDashboardTranslations,
} {
v, err = fn(v)
if err != nil {

View File

@ -0,0 +1,28 @@
package mutator
import (
"fmt"
"github.com/databricks/cli/libs/dyn"
)
func (t *translateContext) applyDashboardTranslations(v dyn.Value) (dyn.Value, error) {
// Convert the `file_path` field to a local absolute path.
// We load the file at this path and use its contents for the dashboard contents.
pattern := dyn.NewPattern(
dyn.Key("resources"),
dyn.Key("dashboards"),
dyn.AnyKey(),
dyn.Key("file_path"),
)
return dyn.MapByPattern(v, pattern, func(p dyn.Path, v dyn.Value) (dyn.Value, error) {
key := p[2].Key()
dir, err := v.Location().Directory()
if err != nil {
return dyn.InvalidValue, fmt.Errorf("unable to determine directory for dashboard %s: %w", key, err)
}
return t.rewriteRelativeTo(p, v, t.retainLocalAbsoluteFilePath, dir, "")
})
}

View File

@ -699,6 +699,9 @@ func TestTranslatePathJobEnvironments(t *testing.T) {
"../dist/env2.whl",
"simplejson",
"/Workspace/Users/foo@bar.com/test.whl",
"--extra-index-url https://name:token@gitlab.com/api/v4/projects/9876/packages/pypi/simple foobar",
"foobar --extra-index-url https://name:token@gitlab.com/api/v4/projects/9876/packages/pypi/simple",
"https://foo@bar.com/packages/pypi/simple",
},
},
},
@ -719,6 +722,9 @@ func TestTranslatePathJobEnvironments(t *testing.T) {
assert.Equal(t, strings.Join([]string{".", "dist", "env2.whl"}, string(os.PathSeparator)), b.Config.Resources.Jobs["job"].JobSettings.Environments[0].Spec.Dependencies[1])
assert.Equal(t, "simplejson", b.Config.Resources.Jobs["job"].JobSettings.Environments[0].Spec.Dependencies[2])
assert.Equal(t, "/Workspace/Users/foo@bar.com/test.whl", b.Config.Resources.Jobs["job"].JobSettings.Environments[0].Spec.Dependencies[3])
assert.Equal(t, "--extra-index-url https://name:token@gitlab.com/api/v4/projects/9876/packages/pypi/simple foobar", b.Config.Resources.Jobs["job"].JobSettings.Environments[0].Spec.Dependencies[4])
assert.Equal(t, "foobar --extra-index-url https://name:token@gitlab.com/api/v4/projects/9876/packages/pypi/simple", b.Config.Resources.Jobs["job"].JobSettings.Environments[0].Spec.Dependencies[5])
assert.Equal(t, "https://foo@bar.com/packages/pypi/simple", b.Config.Resources.Jobs["job"].JobSettings.Environments[0].Spec.Dependencies[6])
}
func TestTranslatePathWithComplexVariables(t *testing.T) {

View File

@ -3,6 +3,7 @@ package config
import (
"context"
"fmt"
"net/url"
"github.com/databricks/cli/bundle/config/resources"
"github.com/databricks/databricks-sdk-go"
@ -21,6 +22,7 @@ type Resources struct {
Schemas map[string]*resources.Schema `json:"schemas,omitempty"`
Volumes map[string]*resources.Volume `json:"volumes,omitempty"`
Clusters map[string]*resources.Cluster `json:"clusters,omitempty"`
Dashboards map[string]*resources.Dashboard `json:"dashboards,omitempty"`
}
type ConfigResource interface {
@ -31,6 +33,54 @@ type ConfigResource interface {
// Terraform equivalent name of the resource. For example "databricks_job"
// for jobs and "databricks_pipeline" for pipelines.
TerraformResourceName() string
// GetName returns the in-product name of the resource.
GetName() string
// GetURL returns the URL of the resource.
GetURL() string
// InitializeURL initializes the URL field of the resource.
InitializeURL(baseURL url.URL)
}
// ResourceGroup represents a group of resources of the same type.
// It includes a description of the resource type and a map of resources.
type ResourceGroup struct {
Description ResourceDescription
Resources map[string]ConfigResource
}
// collectResourceMap collects resources of a specific type into a ResourceGroup.
func collectResourceMap[T ConfigResource](
description ResourceDescription,
input map[string]T,
) ResourceGroup {
resources := make(map[string]ConfigResource)
for key, resource := range input {
resources[key] = resource
}
return ResourceGroup{
Description: description,
Resources: resources,
}
}
// AllResources returns all resources in the bundle grouped by their resource type.
func (r *Resources) AllResources() []ResourceGroup {
descriptions := SupportedResources()
return []ResourceGroup{
collectResourceMap(descriptions["jobs"], r.Jobs),
collectResourceMap(descriptions["pipelines"], r.Pipelines),
collectResourceMap(descriptions["models"], r.Models),
collectResourceMap(descriptions["experiments"], r.Experiments),
collectResourceMap(descriptions["model_serving_endpoints"], r.ModelServingEndpoints),
collectResourceMap(descriptions["registered_models"], r.RegisteredModels),
collectResourceMap(descriptions["quality_monitors"], r.QualityMonitors),
collectResourceMap(descriptions["schemas"], r.Schemas),
collectResourceMap(descriptions["clusters"], r.Clusters),
collectResourceMap(descriptions["dashboards"], r.Dashboards),
}
}
func (r *Resources) FindResourceByConfigKey(key string) (ConfigResource, error) {
@ -62,21 +112,77 @@ func (r *Resources) FindResourceByConfigKey(key string) (ConfigResource, error)
}
type ResourceDescription struct {
// Singular and plural name when used to refer to the configuration.
SingularName string
PluralName string
// Singular and plural title when used in summaries / terminal UI.
SingularTitle string
PluralTitle string
}
// The keys of the map corresponds to the resource key in the bundle configuration.
func SupportedResources() map[string]ResourceDescription {
return map[string]ResourceDescription{
"jobs": {SingularName: "job"},
"pipelines": {SingularName: "pipeline"},
"models": {SingularName: "model"},
"experiments": {SingularName: "experiment"},
"model_serving_endpoints": {SingularName: "model_serving_endpoint"},
"registered_models": {SingularName: "registered_model"},
"quality_monitors": {SingularName: "quality_monitor"},
"schemas": {SingularName: "schema"},
"volumes": {SingularName: "volume"},
"clusters": {SingularName: "cluster"},
"jobs": {
SingularName: "job",
PluralName: "jobs",
SingularTitle: "Job",
PluralTitle: "Jobs",
},
"pipelines": {
SingularName: "pipeline",
PluralName: "pipelines",
SingularTitle: "Pipeline",
PluralTitle: "Pipelines",
},
"models": {
SingularName: "model",
PluralName: "models",
SingularTitle: "Model",
PluralTitle: "Models",
},
"experiments": {
SingularName: "experiment",
PluralName: "experiments",
SingularTitle: "Experiment",
PluralTitle: "Experiments",
},
"model_serving_endpoints": {
SingularName: "model_serving_endpoint",
PluralName: "model_serving_endpoints",
SingularTitle: "Model Serving Endpoint",
PluralTitle: "Model Serving Endpoints",
},
"registered_models": {
SingularName: "registered_model",
PluralName: "registered_models",
SingularTitle: "Registered Model",
PluralTitle: "Registered Models",
},
"quality_monitors": {
SingularName: "quality_monitor",
PluralName: "quality_monitors",
SingularTitle: "Quality Monitor",
PluralTitle: "Quality Monitors",
},
"schemas": {
SingularName: "schema",
PluralName: "schemas",
SingularTitle: "Schema",
PluralTitle: "Schemas",
},
"clusters": {
SingularName: "cluster",
PluralName: "clusters",
SingularTitle: "Cluster",
PluralTitle: "Clusters",
},
"dashboards": {
SingularName: "dashboard",
PluralName: "dashboards",
SingularTitle: "Dashboard",
PluralTitle: "Dashboards",
},
}
}

View File

@ -2,6 +2,8 @@ package resources
import (
"context"
"fmt"
"net/url"
"github.com/databricks/cli/libs/log"
"github.com/databricks/databricks-sdk-go"
@ -13,6 +15,7 @@ type Cluster struct {
ID string `json:"id,omitempty" bundle:"readonly"`
Permissions []Permission `json:"permissions,omitempty"`
ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"`
URL string `json:"url,omitempty" bundle:"internal"`
*compute.ClusterSpec
}
@ -37,3 +40,19 @@ func (s *Cluster) Exists(ctx context.Context, w *databricks.WorkspaceClient, id
func (s *Cluster) TerraformResourceName() string {
return "databricks_cluster"
}
func (s *Cluster) InitializeURL(baseURL url.URL) {
if s.ID == "" {
return
}
baseURL.Path = fmt.Sprintf("compute/clusters/%s", s.ID)
s.URL = baseURL.String()
}
func (s *Cluster) GetName() string {
return s.ClusterName
}
func (s *Cluster) GetURL() string {
return s.URL
}

View File

@ -0,0 +1,81 @@
package resources
import (
"context"
"fmt"
"net/url"
"github.com/databricks/cli/libs/log"
"github.com/databricks/databricks-sdk-go"
"github.com/databricks/databricks-sdk-go/marshal"
"github.com/databricks/databricks-sdk-go/service/dashboards"
)
type Dashboard struct {
ID string `json:"id,omitempty" bundle:"readonly"`
Permissions []Permission `json:"permissions,omitempty"`
ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"`
URL string `json:"url,omitempty" bundle:"internal"`
*dashboards.CreateDashboardRequest
// =========================
// === Additional fields ===
// =========================
// SerializedDashboard holds the contents of the dashboard in serialized JSON form.
// We override the field's type from the SDK struct here to allow for inlining as YAML.
// If the value is a string, it is used as is.
// If it is not a string, its contents is marshalled as JSON.
SerializedDashboard any `json:"serialized_dashboard,omitempty"`
// EmbedCredentials is a flag to indicate if the publisher's credentials should
// be embedded in the published dashboard. These embedded credentials will be used
// to execute the published dashboard's queries.
//
// Defaults to false if not set.
EmbedCredentials bool `json:"embed_credentials,omitempty"`
// FilePath points to the local `.lvdash.json` file containing the dashboard definition.
FilePath string `json:"file_path,omitempty"`
}
func (r *Dashboard) UnmarshalJSON(b []byte) error {
return marshal.Unmarshal(b, r)
}
func (r Dashboard) MarshalJSON() ([]byte, error) {
return marshal.Marshal(r)
}
func (*Dashboard) Exists(ctx context.Context, w *databricks.WorkspaceClient, id string) (bool, error) {
_, err := w.Lakeview.Get(ctx, dashboards.GetDashboardRequest{
DashboardId: id,
})
if err != nil {
log.Debugf(ctx, "dashboard %s does not exist", id)
return false, err
}
return true, nil
}
func (*Dashboard) TerraformResourceName() string {
return "databricks_dashboard"
}
func (r *Dashboard) InitializeURL(baseURL url.URL) {
if r.ID == "" {
return
}
baseURL.Path = fmt.Sprintf("dashboardsv3/%s/published", r.ID)
r.URL = baseURL.String()
}
func (r *Dashboard) GetName() string {
return r.DisplayName
}
func (r *Dashboard) GetURL() string {
return r.URL
}

View File

@ -2,6 +2,8 @@ package resources
import (
"context"
"fmt"
"net/url"
"strconv"
"github.com/databricks/cli/libs/log"
@ -14,6 +16,7 @@ type Job struct {
ID string `json:"id,omitempty" bundle:"readonly"`
Permissions []Permission `json:"permissions,omitempty"`
ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"`
URL string `json:"url,omitempty" bundle:"internal"`
*jobs.JobSettings
}
@ -44,3 +47,19 @@ func (j *Job) Exists(ctx context.Context, w *databricks.WorkspaceClient, id stri
func (j *Job) TerraformResourceName() string {
return "databricks_job"
}
func (j *Job) InitializeURL(baseURL url.URL) {
if j.ID == "" {
return
}
baseURL.Path = fmt.Sprintf("jobs/%s", j.ID)
j.URL = baseURL.String()
}
func (j *Job) GetName() string {
return j.Name
}
func (j *Job) GetURL() string {
return j.URL
}

View File

@ -2,6 +2,8 @@ package resources
import (
"context"
"fmt"
"net/url"
"github.com/databricks/cli/libs/log"
"github.com/databricks/databricks-sdk-go"
@ -13,6 +15,7 @@ type MlflowExperiment struct {
ID string `json:"id,omitempty" bundle:"readonly"`
Permissions []Permission `json:"permissions,omitempty"`
ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"`
URL string `json:"url,omitempty" bundle:"internal"`
*ml.Experiment
}
@ -39,3 +42,19 @@ func (s *MlflowExperiment) Exists(ctx context.Context, w *databricks.WorkspaceCl
func (s *MlflowExperiment) TerraformResourceName() string {
return "databricks_mlflow_experiment"
}
func (s *MlflowExperiment) InitializeURL(baseURL url.URL) {
if s.ID == "" {
return
}
baseURL.Path = fmt.Sprintf("ml/experiments/%s", s.ID)
s.URL = baseURL.String()
}
func (s *MlflowExperiment) GetName() string {
return s.Name
}
func (s *MlflowExperiment) GetURL() string {
return s.URL
}

View File

@ -2,6 +2,8 @@ package resources
import (
"context"
"fmt"
"net/url"
"github.com/databricks/cli/libs/log"
"github.com/databricks/databricks-sdk-go"
@ -13,6 +15,7 @@ type MlflowModel struct {
ID string `json:"id,omitempty" bundle:"readonly"`
Permissions []Permission `json:"permissions,omitempty"`
ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"`
URL string `json:"url,omitempty" bundle:"internal"`
*ml.Model
}
@ -39,3 +42,19 @@ func (s *MlflowModel) Exists(ctx context.Context, w *databricks.WorkspaceClient,
func (s *MlflowModel) TerraformResourceName() string {
return "databricks_mlflow_model"
}
func (s *MlflowModel) InitializeURL(baseURL url.URL) {
if s.ID == "" {
return
}
baseURL.Path = fmt.Sprintf("ml/models/%s", s.ID)
s.URL = baseURL.String()
}
func (s *MlflowModel) GetName() string {
return s.Name
}
func (s *MlflowModel) GetURL() string {
return s.URL
}

View File

@ -2,6 +2,8 @@ package resources
import (
"context"
"fmt"
"net/url"
"github.com/databricks/cli/libs/log"
"github.com/databricks/databricks-sdk-go"
@ -23,6 +25,7 @@ type ModelServingEndpoint struct {
Permissions []Permission `json:"permissions,omitempty"`
ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"`
URL string `json:"url,omitempty" bundle:"internal"`
}
func (s *ModelServingEndpoint) UnmarshalJSON(b []byte) error {
@ -47,3 +50,19 @@ func (s *ModelServingEndpoint) Exists(ctx context.Context, w *databricks.Workspa
func (s *ModelServingEndpoint) TerraformResourceName() string {
return "databricks_model_serving"
}
func (s *ModelServingEndpoint) InitializeURL(baseURL url.URL) {
if s.ID == "" {
return
}
baseURL.Path = fmt.Sprintf("ml/endpoints/%s", s.ID)
s.URL = baseURL.String()
}
func (s *ModelServingEndpoint) GetName() string {
return s.Name
}
func (s *ModelServingEndpoint) GetURL() string {
return s.URL
}

View File

@ -1,5 +1,7 @@
package resources
import "fmt"
// Permission holds the permission level setting for a single principal.
// Multiple of these can be defined on any resource.
type Permission struct {
@ -9,3 +11,19 @@ type Permission struct {
ServicePrincipalName string `json:"service_principal_name,omitempty"`
GroupName string `json:"group_name,omitempty"`
}
func (p Permission) String() string {
if p.UserName != "" {
return fmt.Sprintf("level: %s, user_name: %s", p.Level, p.UserName)
}
if p.ServicePrincipalName != "" {
return fmt.Sprintf("level: %s, service_principal_name: %s", p.Level, p.ServicePrincipalName)
}
if p.GroupName != "" {
return fmt.Sprintf("level: %s, group_name: %s", p.Level, p.GroupName)
}
return fmt.Sprintf("level: %s", p.Level)
}

View File

@ -2,6 +2,8 @@ package resources
import (
"context"
"fmt"
"net/url"
"github.com/databricks/cli/libs/log"
"github.com/databricks/databricks-sdk-go"
@ -13,6 +15,7 @@ type Pipeline struct {
ID string `json:"id,omitempty" bundle:"readonly"`
Permissions []Permission `json:"permissions,omitempty"`
ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"`
URL string `json:"url,omitempty" bundle:"internal"`
*pipelines.PipelineSpec
}
@ -39,3 +42,19 @@ func (p *Pipeline) Exists(ctx context.Context, w *databricks.WorkspaceClient, id
func (p *Pipeline) TerraformResourceName() string {
return "databricks_pipeline"
}
func (p *Pipeline) InitializeURL(baseURL url.URL) {
if p.ID == "" {
return
}
baseURL.Path = fmt.Sprintf("pipelines/%s", p.ID)
p.URL = baseURL.String()
}
func (p *Pipeline) GetName() string {
return p.Name
}
func (s *Pipeline) GetURL() string {
return s.URL
}

View File

@ -2,6 +2,9 @@ package resources
import (
"context"
"fmt"
"net/url"
"strings"
"github.com/databricks/cli/libs/log"
"github.com/databricks/databricks-sdk-go"
@ -20,6 +23,7 @@ type QualityMonitor struct {
ID string `json:"id,omitempty" bundle:"readonly"`
ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"`
URL string `json:"url,omitempty" bundle:"internal"`
}
func (s *QualityMonitor) UnmarshalJSON(b []byte) error {
@ -44,3 +48,19 @@ func (s *QualityMonitor) Exists(ctx context.Context, w *databricks.WorkspaceClie
func (s *QualityMonitor) TerraformResourceName() string {
return "databricks_quality_monitor"
}
func (s *QualityMonitor) InitializeURL(baseURL url.URL) {
if s.TableName == "" {
return
}
baseURL.Path = fmt.Sprintf("explore/data/%s", strings.ReplaceAll(s.TableName, ".", "/"))
s.URL = baseURL.String()
}
func (s *QualityMonitor) GetName() string {
return s.TableName
}
func (s *QualityMonitor) GetURL() string {
return s.URL
}

View File

@ -2,6 +2,9 @@ package resources
import (
"context"
"fmt"
"net/url"
"strings"
"github.com/databricks/cli/libs/log"
"github.com/databricks/databricks-sdk-go"
@ -24,6 +27,7 @@ type RegisteredModel struct {
*catalog.CreateRegisteredModelRequest
ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"`
URL string `json:"url,omitempty" bundle:"internal"`
}
func (s *RegisteredModel) UnmarshalJSON(b []byte) error {
@ -48,3 +52,19 @@ func (s *RegisteredModel) Exists(ctx context.Context, w *databricks.WorkspaceCli
func (s *RegisteredModel) TerraformResourceName() string {
return "databricks_registered_model"
}
func (s *RegisteredModel) InitializeURL(baseURL url.URL) {
if s.ID == "" {
return
}
baseURL.Path = fmt.Sprintf("explore/data/models/%s", strings.ReplaceAll(s.ID, ".", "/"))
s.URL = baseURL.String()
}
func (s *RegisteredModel) GetName() string {
return s.Name
}
func (s *RegisteredModel) GetURL() string {
return s.URL
}

View File

@ -1,6 +1,12 @@
package resources
import (
"context"
"fmt"
"net/url"
"strings"
"github.com/databricks/databricks-sdk-go"
"github.com/databricks/databricks-sdk-go/marshal"
"github.com/databricks/databricks-sdk-go/service/catalog"
)
@ -16,6 +22,31 @@ type Schema struct {
*catalog.CreateSchema
ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"`
URL string `json:"url,omitempty" bundle:"internal"`
}
func (s *Schema) Exists(ctx context.Context, w *databricks.WorkspaceClient, id string) (bool, error) {
return false, fmt.Errorf("schema.Exists() is not supported")
}
func (s *Schema) TerraformResourceName() string {
return "databricks_schema"
}
func (s *Schema) InitializeURL(baseURL url.URL) {
if s.ID == "" {
return
}
baseURL.Path = fmt.Sprintf("explore/data/%s", strings.ReplaceAll(s.ID, ".", "/"))
s.URL = baseURL.String()
}
func (s *Schema) GetURL() string {
return s.URL
}
func (s *Schema) GetName() string {
return s.Name
}
func (s *Schema) UnmarshalJSON(b []byte) error {

View File

@ -63,17 +63,37 @@ func TestCustomMarshallerIsImplemented(t *testing.T) {
}
}
func TestResourcesAllResourcesCompleteness(t *testing.T) {
r := Resources{}
rt := reflect.TypeOf(r)
// Collect set of includes resource types
var types []string
for _, group := range r.AllResources() {
types = append(types, group.Description.PluralName)
}
for i := 0; i < rt.NumField(); i++ {
field := rt.Field(i)
jsonTag := field.Tag.Get("json")
if idx := strings.Index(jsonTag, ","); idx != -1 {
jsonTag = jsonTag[:idx]
}
assert.Contains(t, types, jsonTag, "Field %s is missing in AllResources", field.Name)
}
}
func TestSupportedResources(t *testing.T) {
expected := map[string]ResourceDescription{}
// Please add your resource to the SupportedResources() function in resources.go if you add a new resource.
actual := SupportedResources()
typ := reflect.TypeOf(Resources{})
for i := 0; i < typ.NumField(); i++ {
field := typ.Field(i)
jsonTags := strings.Split(field.Tag.Get("json"), ",")
singularName := strings.TrimSuffix(jsonTags[0], "s")
expected[jsonTags[0]] = ResourceDescription{SingularName: singularName}
pluralName := jsonTags[0]
assert.Equal(t, actual[pluralName].PluralName, pluralName)
}
// Please add your resource to the SupportedResources() function in resources.go
// if you are adding a new resource.
assert.Equal(t, expected, SupportedResources())
}

View File

@ -0,0 +1,126 @@
package validate
import (
"context"
"fmt"
"path"
"strings"
"github.com/databricks/cli/bundle"
"github.com/databricks/cli/bundle/libraries"
"github.com/databricks/cli/bundle/permissions"
"github.com/databricks/cli/libs/diag"
"github.com/databricks/databricks-sdk-go/apierr"
"github.com/databricks/databricks-sdk-go/service/workspace"
"golang.org/x/sync/errgroup"
)
type folderPermissions struct {
}
// Apply implements bundle.ReadOnlyMutator.
func (f *folderPermissions) Apply(ctx context.Context, b bundle.ReadOnlyBundle) diag.Diagnostics {
if len(b.Config().Permissions) == 0 {
return nil
}
rootPath := b.Config().Workspace.RootPath
paths := []string{}
if !libraries.IsVolumesPath(rootPath) && !libraries.IsWorkspaceSharedPath(rootPath) {
paths = append(paths, rootPath)
}
if !strings.HasSuffix(rootPath, "/") {
rootPath += "/"
}
for _, p := range []string{
b.Config().Workspace.ArtifactPath,
b.Config().Workspace.FilePath,
b.Config().Workspace.StatePath,
b.Config().Workspace.ResourcePath,
} {
if libraries.IsWorkspaceSharedPath(p) || libraries.IsVolumesPath(p) {
continue
}
if strings.HasPrefix(p, rootPath) {
continue
}
paths = append(paths, p)
}
var diags diag.Diagnostics
g, ctx := errgroup.WithContext(ctx)
results := make([]diag.Diagnostics, len(paths))
for i, p := range paths {
g.Go(func() error {
results[i] = checkFolderPermission(ctx, b, p)
return nil
})
}
if err := g.Wait(); err != nil {
return diag.FromErr(err)
}
for _, r := range results {
diags = diags.Extend(r)
}
return diags
}
func checkFolderPermission(ctx context.Context, b bundle.ReadOnlyBundle, folderPath string) diag.Diagnostics {
w := b.WorkspaceClient().Workspace
obj, err := getClosestExistingObject(ctx, w, folderPath)
if err != nil {
return diag.FromErr(err)
}
objPermissions, err := w.GetPermissions(ctx, workspace.GetWorkspaceObjectPermissionsRequest{
WorkspaceObjectId: fmt.Sprint(obj.ObjectId),
WorkspaceObjectType: "directories",
})
if err != nil {
return diag.FromErr(err)
}
p := permissions.ObjectAclToResourcePermissions(folderPath, objPermissions.AccessControlList)
return p.Compare(b.Config().Permissions)
}
func getClosestExistingObject(ctx context.Context, w workspace.WorkspaceInterface, folderPath string) (*workspace.ObjectInfo, error) {
for {
obj, err := w.GetStatusByPath(ctx, folderPath)
if err == nil {
return obj, nil
}
if !apierr.IsMissing(err) {
return nil, err
}
parent := path.Dir(folderPath)
// If the parent is the same as the current folder, then we have reached the root
if folderPath == parent {
break
}
folderPath = parent
}
return nil, fmt.Errorf("folder %s and its parent folders do not exist", folderPath)
}
// Name implements bundle.ReadOnlyMutator.
func (f *folderPermissions) Name() string {
return "validate:folder_permissions"
}
// ValidateFolderPermissions validates that permissions for the folders in Workspace file system matches
// the permissions in the top-level permissions section of the bundle.
func ValidateFolderPermissions() bundle.ReadOnlyMutator {
return &folderPermissions{}
}

View File

@ -0,0 +1,208 @@
package validate
import (
"context"
"testing"
"github.com/databricks/cli/bundle"
"github.com/databricks/cli/bundle/config"
"github.com/databricks/cli/bundle/config/resources"
"github.com/databricks/cli/bundle/permissions"
"github.com/databricks/cli/libs/diag"
"github.com/databricks/databricks-sdk-go/apierr"
"github.com/databricks/databricks-sdk-go/experimental/mocks"
"github.com/databricks/databricks-sdk-go/service/workspace"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
)
func TestFolderPermissionsInheritedWhenRootPathDoesNotExist(t *testing.T) {
b := &bundle.Bundle{
Config: config.Root{
Workspace: config.Workspace{
RootPath: "/Workspace/Users/foo@bar.com",
ArtifactPath: "/Workspace/Users/otherfoo@bar.com/artifacts",
FilePath: "/Workspace/Users/foo@bar.com/files",
StatePath: "/Workspace/Users/foo@bar.com/state",
ResourcePath: "/Workspace/Users/foo@bar.com/resources",
},
Permissions: []resources.Permission{
{Level: permissions.CAN_MANAGE, UserName: "foo@bar.com"},
},
},
}
m := mocks.NewMockWorkspaceClient(t)
api := m.GetMockWorkspaceAPI()
api.EXPECT().GetStatusByPath(mock.Anything, "/Workspace/Users/otherfoo@bar.com/artifacts").Return(nil, &apierr.APIError{
StatusCode: 404,
ErrorCode: "RESOURCE_DOES_NOT_EXIST",
})
api.EXPECT().GetStatusByPath(mock.Anything, "/Workspace/Users/otherfoo@bar.com").Return(nil, &apierr.APIError{
StatusCode: 404,
ErrorCode: "RESOURCE_DOES_NOT_EXIST",
})
api.EXPECT().GetStatusByPath(mock.Anything, "/Workspace/Users/foo@bar.com").Return(nil, &apierr.APIError{
StatusCode: 404,
ErrorCode: "RESOURCE_DOES_NOT_EXIST",
})
api.EXPECT().GetStatusByPath(mock.Anything, "/Workspace/Users").Return(nil, &apierr.APIError{
StatusCode: 404,
ErrorCode: "RESOURCE_DOES_NOT_EXIST",
})
api.EXPECT().GetStatusByPath(mock.Anything, "/Workspace").Return(&workspace.ObjectInfo{
ObjectId: 1234,
}, nil)
api.EXPECT().GetPermissions(mock.Anything, workspace.GetWorkspaceObjectPermissionsRequest{
WorkspaceObjectId: "1234",
WorkspaceObjectType: "directories",
}).Return(&workspace.WorkspaceObjectPermissions{
ObjectId: "1234",
AccessControlList: []workspace.WorkspaceObjectAccessControlResponse{
{
UserName: "foo@bar.com",
AllPermissions: []workspace.WorkspaceObjectPermission{
{PermissionLevel: "CAN_MANAGE"},
},
},
},
}, nil)
b.SetWorkpaceClient(m.WorkspaceClient)
rb := bundle.ReadOnly(b)
diags := bundle.ApplyReadOnly(context.Background(), rb, ValidateFolderPermissions())
require.Empty(t, diags)
}
func TestValidateFolderPermissionsFailsOnMissingBundlePermission(t *testing.T) {
b := &bundle.Bundle{
Config: config.Root{
Workspace: config.Workspace{
RootPath: "/Workspace/Users/foo@bar.com",
ArtifactPath: "/Workspace/Users/foo@bar.com/artifacts",
FilePath: "/Workspace/Users/foo@bar.com/files",
StatePath: "/Workspace/Users/foo@bar.com/state",
ResourcePath: "/Workspace/Users/foo@bar.com/resources",
},
Permissions: []resources.Permission{
{Level: permissions.CAN_MANAGE, UserName: "foo@bar.com"},
},
},
}
m := mocks.NewMockWorkspaceClient(t)
api := m.GetMockWorkspaceAPI()
api.EXPECT().GetStatusByPath(mock.Anything, "/Workspace/Users/foo@bar.com").Return(&workspace.ObjectInfo{
ObjectId: 1234,
}, nil)
api.EXPECT().GetPermissions(mock.Anything, workspace.GetWorkspaceObjectPermissionsRequest{
WorkspaceObjectId: "1234",
WorkspaceObjectType: "directories",
}).Return(&workspace.WorkspaceObjectPermissions{
ObjectId: "1234",
AccessControlList: []workspace.WorkspaceObjectAccessControlResponse{
{
UserName: "foo@bar.com",
AllPermissions: []workspace.WorkspaceObjectPermission{
{PermissionLevel: "CAN_MANAGE"},
},
},
{
UserName: "foo2@bar.com",
AllPermissions: []workspace.WorkspaceObjectPermission{
{PermissionLevel: "CAN_MANAGE"},
},
},
},
}, nil)
b.SetWorkpaceClient(m.WorkspaceClient)
rb := bundle.ReadOnly(b)
diags := bundle.ApplyReadOnly(context.Background(), rb, ValidateFolderPermissions())
require.Len(t, diags, 1)
require.Equal(t, "untracked permissions apply to target workspace path", diags[0].Summary)
require.Equal(t, diag.Warning, diags[0].Severity)
require.Equal(t, "The following permissions apply to the workspace folder at \"/Workspace/Users/foo@bar.com\" but are not configured in the bundle:\n- level: CAN_MANAGE, user_name: foo2@bar.com\n", diags[0].Detail)
}
func TestValidateFolderPermissionsFailsOnPermissionMismatch(t *testing.T) {
b := &bundle.Bundle{
Config: config.Root{
Workspace: config.Workspace{
RootPath: "/Workspace/Users/foo@bar.com",
ArtifactPath: "/Workspace/Users/foo@bar.com/artifacts",
FilePath: "/Workspace/Users/foo@bar.com/files",
StatePath: "/Workspace/Users/foo@bar.com/state",
ResourcePath: "/Workspace/Users/foo@bar.com/resources",
},
Permissions: []resources.Permission{
{Level: permissions.CAN_MANAGE, UserName: "foo@bar.com"},
},
},
}
m := mocks.NewMockWorkspaceClient(t)
api := m.GetMockWorkspaceAPI()
api.EXPECT().GetStatusByPath(mock.Anything, "/Workspace/Users/foo@bar.com").Return(&workspace.ObjectInfo{
ObjectId: 1234,
}, nil)
api.EXPECT().GetPermissions(mock.Anything, workspace.GetWorkspaceObjectPermissionsRequest{
WorkspaceObjectId: "1234",
WorkspaceObjectType: "directories",
}).Return(&workspace.WorkspaceObjectPermissions{
ObjectId: "1234",
AccessControlList: []workspace.WorkspaceObjectAccessControlResponse{
{
UserName: "foo2@bar.com",
AllPermissions: []workspace.WorkspaceObjectPermission{
{PermissionLevel: "CAN_MANAGE"},
},
},
},
}, nil)
b.SetWorkpaceClient(m.WorkspaceClient)
rb := bundle.ReadOnly(b)
diags := bundle.ApplyReadOnly(context.Background(), rb, ValidateFolderPermissions())
require.Len(t, diags, 1)
require.Equal(t, "untracked permissions apply to target workspace path", diags[0].Summary)
require.Equal(t, diag.Warning, diags[0].Severity)
}
func TestValidateFolderPermissionsFailsOnNoRootFolder(t *testing.T) {
b := &bundle.Bundle{
Config: config.Root{
Workspace: config.Workspace{
RootPath: "/NotExisting",
ArtifactPath: "/NotExisting/artifacts",
FilePath: "/NotExisting/files",
StatePath: "/NotExisting/state",
ResourcePath: "/NotExisting/resources",
},
Permissions: []resources.Permission{
{Level: permissions.CAN_MANAGE, UserName: "foo@bar.com"},
},
},
}
m := mocks.NewMockWorkspaceClient(t)
api := m.GetMockWorkspaceAPI()
api.EXPECT().GetStatusByPath(mock.Anything, "/NotExisting").Return(nil, &apierr.APIError{
StatusCode: 404,
ErrorCode: "RESOURCE_DOES_NOT_EXIST",
})
api.EXPECT().GetStatusByPath(mock.Anything, "/").Return(nil, &apierr.APIError{
StatusCode: 404,
ErrorCode: "RESOURCE_DOES_NOT_EXIST",
})
b.SetWorkpaceClient(m.WorkspaceClient)
rb := bundle.ReadOnly(b)
diags := bundle.ApplyReadOnly(context.Background(), rb, ValidateFolderPermissions())
require.Len(t, diags, 1)
require.Equal(t, "folder / and its parent folders do not exist", diags[0].Summary)
require.Equal(t, diag.Error, diags[0].Severity)
}

View File

@ -35,6 +35,7 @@ func (v *validate) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics
FilesToSync(),
ValidateSyncPatterns(),
JobTaskClusterSpec(),
ValidateFolderPermissions(),
))
}

View File

@ -0,0 +1,117 @@
package terraform
import (
"context"
"fmt"
"github.com/databricks/cli/bundle"
"github.com/databricks/cli/libs/diag"
"github.com/databricks/cli/libs/dyn"
tfjson "github.com/hashicorp/terraform-json"
)
type dashboardState struct {
Name string
ID string
ETag string
}
func collectDashboardsFromState(ctx context.Context, b *bundle.Bundle) ([]dashboardState, error) {
state, err := ParseResourcesState(ctx, b)
if err != nil && state == nil {
return nil, err
}
var dashboards []dashboardState
for _, resource := range state.Resources {
if resource.Mode != tfjson.ManagedResourceMode {
continue
}
for _, instance := range resource.Instances {
switch resource.Type {
case "databricks_dashboard":
dashboards = append(dashboards, dashboardState{
Name: resource.Name,
ID: instance.Attributes.ID,
ETag: instance.Attributes.ETag,
})
}
}
}
return dashboards, nil
}
type checkDashboardsModifiedRemotely struct {
}
func (l *checkDashboardsModifiedRemotely) Name() string {
return "CheckDashboardsModifiedRemotely"
}
func (l *checkDashboardsModifiedRemotely) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
// This mutator is relevant only if the bundle includes dashboards.
if len(b.Config.Resources.Dashboards) == 0 {
return nil
}
// If the user has forced the deployment, skip this check.
if b.Config.Bundle.Force {
return nil
}
dashboards, err := collectDashboardsFromState(ctx, b)
if err != nil {
return diag.FromErr(err)
}
var diags diag.Diagnostics
for _, dashboard := range dashboards {
// Skip dashboards that are not defined in the bundle.
// These will be destroyed upon deployment.
if _, ok := b.Config.Resources.Dashboards[dashboard.Name]; !ok {
continue
}
path := dyn.MustPathFromString(fmt.Sprintf("resources.dashboards.%s", dashboard.Name))
loc := b.Config.GetLocation(path.String())
actual, err := b.WorkspaceClient().Lakeview.GetByDashboardId(ctx, dashboard.ID)
if err != nil {
diags = diags.Append(diag.Diagnostic{
Severity: diag.Error,
Summary: fmt.Sprintf("failed to get dashboard %q", dashboard.Name),
Detail: err.Error(),
Paths: []dyn.Path{path},
Locations: []dyn.Location{loc},
})
continue
}
// If the ETag is the same, the dashboard has not been modified.
if actual.Etag == dashboard.ETag {
continue
}
diags = diags.Append(diag.Diagnostic{
Severity: diag.Error,
Summary: fmt.Sprintf("dashboard %q has been modified remotely", dashboard.Name),
Detail: "" +
"This dashboard has been modified remotely since the last bundle deployment.\n" +
"These modifications are untracked and will be overwritten on deploy.\n" +
"\n" +
"Make sure that the local dashboard definition matches what you intend to deploy\n" +
"before proceeding with the deployment.\n" +
"\n" +
"Run `databricks bundle deploy --force` to bypass this error." +
"",
Paths: []dyn.Path{path},
Locations: []dyn.Location{loc},
})
}
return diags
}
func CheckDashboardsModifiedRemotely() *checkDashboardsModifiedRemotely {
return &checkDashboardsModifiedRemotely{}
}

View File

@ -0,0 +1,191 @@
package terraform
import (
"context"
"fmt"
"path/filepath"
"testing"
"github.com/databricks/cli/bundle"
"github.com/databricks/cli/bundle/config"
"github.com/databricks/cli/bundle/config/resources"
"github.com/databricks/cli/internal/testutil"
"github.com/databricks/cli/libs/diag"
"github.com/databricks/databricks-sdk-go/experimental/mocks"
"github.com/databricks/databricks-sdk-go/service/dashboards"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
)
func mockDashboardBundle(t *testing.T) *bundle.Bundle {
dir := t.TempDir()
b := &bundle.Bundle{
BundleRootPath: dir,
Config: config.Root{
Bundle: config.Bundle{
Target: "test",
},
Resources: config.Resources{
Dashboards: map[string]*resources.Dashboard{
"dash1": {
CreateDashboardRequest: &dashboards.CreateDashboardRequest{
DisplayName: "My Special Dashboard",
},
},
},
},
},
}
return b
}
func TestCheckDashboardsModifiedRemotely_NoDashboards(t *testing.T) {
dir := t.TempDir()
b := &bundle.Bundle{
BundleRootPath: dir,
Config: config.Root{
Bundle: config.Bundle{
Target: "test",
},
Resources: config.Resources{},
},
}
diags := bundle.Apply(context.Background(), b, CheckDashboardsModifiedRemotely())
assert.Empty(t, diags)
}
func TestCheckDashboardsModifiedRemotely_FirstDeployment(t *testing.T) {
b := mockDashboardBundle(t)
diags := bundle.Apply(context.Background(), b, CheckDashboardsModifiedRemotely())
assert.Empty(t, diags)
}
func TestCheckDashboardsModifiedRemotely_ExistingStateNoChange(t *testing.T) {
ctx := context.Background()
b := mockDashboardBundle(t)
writeFakeDashboardState(t, ctx, b)
// Mock the call to the API.
m := mocks.NewMockWorkspaceClient(t)
dashboardsAPI := m.GetMockLakeviewAPI()
dashboardsAPI.EXPECT().
GetByDashboardId(mock.Anything, "id1").
Return(&dashboards.Dashboard{
DisplayName: "My Special Dashboard",
Etag: "1000",
}, nil).
Once()
b.SetWorkpaceClient(m.WorkspaceClient)
// No changes, so no diags.
diags := bundle.Apply(ctx, b, CheckDashboardsModifiedRemotely())
assert.Empty(t, diags)
}
func TestCheckDashboardsModifiedRemotely_ExistingStateChange(t *testing.T) {
ctx := context.Background()
b := mockDashboardBundle(t)
writeFakeDashboardState(t, ctx, b)
// Mock the call to the API.
m := mocks.NewMockWorkspaceClient(t)
dashboardsAPI := m.GetMockLakeviewAPI()
dashboardsAPI.EXPECT().
GetByDashboardId(mock.Anything, "id1").
Return(&dashboards.Dashboard{
DisplayName: "My Special Dashboard",
Etag: "1234",
}, nil).
Once()
b.SetWorkpaceClient(m.WorkspaceClient)
// The dashboard has changed, so expect an error.
diags := bundle.Apply(ctx, b, CheckDashboardsModifiedRemotely())
if assert.Len(t, diags, 1) {
assert.Equal(t, diag.Error, diags[0].Severity)
assert.Equal(t, `dashboard "dash1" has been modified remotely`, diags[0].Summary)
}
}
func TestCheckDashboardsModifiedRemotely_ExistingStateFailureToGet(t *testing.T) {
ctx := context.Background()
b := mockDashboardBundle(t)
writeFakeDashboardState(t, ctx, b)
// Mock the call to the API.
m := mocks.NewMockWorkspaceClient(t)
dashboardsAPI := m.GetMockLakeviewAPI()
dashboardsAPI.EXPECT().
GetByDashboardId(mock.Anything, "id1").
Return(nil, fmt.Errorf("failure")).
Once()
b.SetWorkpaceClient(m.WorkspaceClient)
// Unable to get the dashboard, so expect an error.
diags := bundle.Apply(ctx, b, CheckDashboardsModifiedRemotely())
if assert.Len(t, diags, 1) {
assert.Equal(t, diag.Error, diags[0].Severity)
assert.Equal(t, `failed to get dashboard "dash1"`, diags[0].Summary)
}
}
func writeFakeDashboardState(t *testing.T, ctx context.Context, b *bundle.Bundle) {
tfDir, err := Dir(ctx, b)
require.NoError(t, err)
// Write fake state file.
testutil.WriteFile(t, `
{
"version": 4,
"terraform_version": "1.5.5",
"resources": [
{
"mode": "managed",
"type": "databricks_dashboard",
"name": "dash1",
"instances": [
{
"schema_version": 0,
"attributes": {
"etag": "1000",
"id": "id1"
}
}
]
},
{
"mode": "managed",
"type": "databricks_job",
"name": "job",
"instances": [
{
"schema_version": 0,
"attributes": {
"id": "1234"
}
}
]
},
{
"mode": "managed",
"type": "databricks_dashboard",
"name": "dash2",
"instances": [
{
"schema_version": 0,
"attributes": {
"etag": "1001",
"id": "id2"
}
}
]
}
]
}
`, filepath.Join(tfDir, TerraformStateFileName))
}

View File

@ -2,9 +2,7 @@ package terraform
import (
"context"
"encoding/json"
"fmt"
"sort"
"github.com/databricks/cli/bundle/config"
"github.com/databricks/cli/bundle/config/resources"
@ -14,244 +12,6 @@ import (
tfjson "github.com/hashicorp/terraform-json"
)
func conv(from any, to any) {
buf, _ := json.Marshal(from)
json.Unmarshal(buf, &to)
}
func convPermissions(acl []resources.Permission) *schema.ResourcePermissions {
if len(acl) == 0 {
return nil
}
resource := schema.ResourcePermissions{}
for _, ac := range acl {
resource.AccessControl = append(resource.AccessControl, convPermission(ac))
}
return &resource
}
func convPermission(ac resources.Permission) schema.ResourcePermissionsAccessControl {
dst := schema.ResourcePermissionsAccessControl{
PermissionLevel: ac.Level,
}
if ac.UserName != "" {
dst.UserName = ac.UserName
}
if ac.GroupName != "" {
dst.GroupName = ac.GroupName
}
if ac.ServicePrincipalName != "" {
dst.ServicePrincipalName = ac.ServicePrincipalName
}
return dst
}
func convGrants(acl []resources.Grant) *schema.ResourceGrants {
if len(acl) == 0 {
return nil
}
resource := schema.ResourceGrants{}
for _, ac := range acl {
resource.Grant = append(resource.Grant, schema.ResourceGrantsGrant{
Privileges: ac.Privileges,
Principal: ac.Principal,
})
}
return &resource
}
// BundleToTerraform converts resources in a bundle configuration
// to the equivalent Terraform JSON representation.
//
// Note: This function is an older implementation of the conversion logic. It is
// no longer used in any code paths. It is kept around to be used in tests.
// New resources do not need to modify this function and can instead can define
// the conversion login in the tfdyn package.
func BundleToTerraform(config *config.Root) *schema.Root {
tfroot := schema.NewRoot()
tfroot.Provider = schema.NewProviders()
tfroot.Resource = schema.NewResources()
noResources := true
for k, src := range config.Resources.Jobs {
noResources = false
var dst schema.ResourceJob
conv(src, &dst)
if src.JobSettings != nil {
sort.Slice(src.JobSettings.Tasks, func(i, j int) bool {
return src.JobSettings.Tasks[i].TaskKey < src.JobSettings.Tasks[j].TaskKey
})
for _, v := range src.Tasks {
var t schema.ResourceJobTask
conv(v, &t)
for _, v_ := range v.Libraries {
var l schema.ResourceJobTaskLibrary
conv(v_, &l)
t.Library = append(t.Library, l)
}
// Convert for_each_task libraries
if v.ForEachTask != nil {
for _, v_ := range v.ForEachTask.Task.Libraries {
var l schema.ResourceJobTaskForEachTaskTaskLibrary
conv(v_, &l)
t.ForEachTask.Task.Library = append(t.ForEachTask.Task.Library, l)
}
}
dst.Task = append(dst.Task, t)
}
for _, v := range src.JobClusters {
var t schema.ResourceJobJobCluster
conv(v, &t)
dst.JobCluster = append(dst.JobCluster, t)
}
// Unblock downstream work. To be addressed more generally later.
if git := src.GitSource; git != nil {
dst.GitSource = &schema.ResourceJobGitSource{
Url: git.GitUrl,
Branch: git.GitBranch,
Commit: git.GitCommit,
Provider: string(git.GitProvider),
Tag: git.GitTag,
}
}
for _, v := range src.Parameters {
var t schema.ResourceJobParameter
conv(v, &t)
dst.Parameter = append(dst.Parameter, t)
}
}
tfroot.Resource.Job[k] = &dst
// Configure permissions for this resource.
if rp := convPermissions(src.Permissions); rp != nil {
rp.JobId = fmt.Sprintf("${databricks_job.%s.id}", k)
tfroot.Resource.Permissions["job_"+k] = rp
}
}
for k, src := range config.Resources.Pipelines {
noResources = false
var dst schema.ResourcePipeline
conv(src, &dst)
if src.PipelineSpec != nil {
for _, v := range src.Libraries {
var l schema.ResourcePipelineLibrary
conv(v, &l)
dst.Library = append(dst.Library, l)
}
for _, v := range src.Clusters {
var l schema.ResourcePipelineCluster
conv(v, &l)
dst.Cluster = append(dst.Cluster, l)
}
for _, v := range src.Notifications {
var l schema.ResourcePipelineNotification
conv(v, &l)
dst.Notification = append(dst.Notification, l)
}
}
tfroot.Resource.Pipeline[k] = &dst
// Configure permissions for this resource.
if rp := convPermissions(src.Permissions); rp != nil {
rp.PipelineId = fmt.Sprintf("${databricks_pipeline.%s.id}", k)
tfroot.Resource.Permissions["pipeline_"+k] = rp
}
}
for k, src := range config.Resources.Models {
noResources = false
var dst schema.ResourceMlflowModel
conv(src, &dst)
tfroot.Resource.MlflowModel[k] = &dst
// Configure permissions for this resource.
if rp := convPermissions(src.Permissions); rp != nil {
rp.RegisteredModelId = fmt.Sprintf("${databricks_mlflow_model.%s.registered_model_id}", k)
tfroot.Resource.Permissions["mlflow_model_"+k] = rp
}
}
for k, src := range config.Resources.Experiments {
noResources = false
var dst schema.ResourceMlflowExperiment
conv(src, &dst)
tfroot.Resource.MlflowExperiment[k] = &dst
// Configure permissions for this resource.
if rp := convPermissions(src.Permissions); rp != nil {
rp.ExperimentId = fmt.Sprintf("${databricks_mlflow_experiment.%s.id}", k)
tfroot.Resource.Permissions["mlflow_experiment_"+k] = rp
}
}
for k, src := range config.Resources.ModelServingEndpoints {
noResources = false
var dst schema.ResourceModelServing
conv(src, &dst)
tfroot.Resource.ModelServing[k] = &dst
// Configure permissions for this resource.
if rp := convPermissions(src.Permissions); rp != nil {
rp.ServingEndpointId = fmt.Sprintf("${databricks_model_serving.%s.serving_endpoint_id}", k)
tfroot.Resource.Permissions["model_serving_"+k] = rp
}
}
for k, src := range config.Resources.RegisteredModels {
noResources = false
var dst schema.ResourceRegisteredModel
conv(src, &dst)
tfroot.Resource.RegisteredModel[k] = &dst
// Configure permissions for this resource.
if rp := convGrants(src.Grants); rp != nil {
rp.Function = fmt.Sprintf("${databricks_registered_model.%s.id}", k)
tfroot.Resource.Grants["registered_model_"+k] = rp
}
}
for k, src := range config.Resources.QualityMonitors {
noResources = false
var dst schema.ResourceQualityMonitor
conv(src, &dst)
tfroot.Resource.QualityMonitor[k] = &dst
}
for k, src := range config.Resources.Clusters {
noResources = false
var dst schema.ResourceCluster
conv(src, &dst)
tfroot.Resource.Cluster[k] = &dst
}
// We explicitly set "resource" to nil to omit it from a JSON encoding.
// This is required because the terraform CLI requires >= 1 resources defined
// if the "resource" property is used in a .tf.json file.
if noResources {
tfroot.Resource = nil
}
return tfroot
}
// BundleToTerraformWithDynValue converts resources in a bundle configuration
// to the equivalent Terraform JSON representation.
func BundleToTerraformWithDynValue(ctx context.Context, root dyn.Value) (*schema.Root, error) {
@ -426,6 +186,16 @@ func TerraformToBundle(state *resourcesState, config *config.Root) error {
}
cur.ID = instance.Attributes.ID
config.Resources.Clusters[resource.Name] = cur
case "databricks_dashboard":
if config.Resources.Dashboards == nil {
config.Resources.Dashboards = make(map[string]*resources.Dashboard)
}
cur := config.Resources.Dashboards[resource.Name]
if cur == nil {
cur = &resources.Dashboard{ModifiedStatus: resources.ModifiedStatusDeleted}
}
cur.ID = instance.Attributes.ID
config.Resources.Dashboards[resource.Name] = cur
case "databricks_permissions":
case "databricks_grants":
// Ignore; no need to pull these back into the configuration.
@ -485,6 +255,11 @@ func TerraformToBundle(state *resourcesState, config *config.Root) error {
src.ModifiedStatus = resources.ModifiedStatusCreated
}
}
for _, src := range config.Resources.Dashboards {
if src.ModifiedStatus == "" && src.ID == "" {
src.ModifiedStatus = resources.ModifiedStatusCreated
}
}
return nil
}

View File

@ -2,7 +2,6 @@ package terraform
import (
"context"
"encoding/json"
"reflect"
"testing"
@ -13,6 +12,7 @@ import (
"github.com/databricks/cli/libs/dyn/convert"
"github.com/databricks/databricks-sdk-go/service/catalog"
"github.com/databricks/databricks-sdk-go/service/compute"
"github.com/databricks/databricks-sdk-go/service/dashboards"
"github.com/databricks/databricks-sdk-go/service/jobs"
"github.com/databricks/databricks-sdk-go/service/ml"
"github.com/databricks/databricks-sdk-go/service/pipelines"
@ -21,6 +21,27 @@ import (
"github.com/stretchr/testify/require"
)
func produceTerraformConfiguration(t *testing.T, config config.Root) *schema.Root {
vin, err := convert.FromTyped(config, dyn.NilValue)
require.NoError(t, err)
out, err := BundleToTerraformWithDynValue(context.Background(), vin)
require.NoError(t, err)
return out
}
func convertToResourceStruct[T any](t *testing.T, resource *T, data any) {
require.NotNil(t, resource)
require.NotNil(t, data)
// Convert data to a dyn.Value.
vin, err := convert.FromTyped(data, dyn.NilValue)
require.NoError(t, err)
// Convert the dyn.Value to a struct.
err = convert.ToTyped(resource, vin)
require.NoError(t, err)
}
func TestBundleToTerraformJob(t *testing.T) {
var src = resources.Job{
JobSettings: &jobs.JobSettings{
@ -58,8 +79,9 @@ func TestBundleToTerraformJob(t *testing.T) {
},
}
out := BundleToTerraform(&config)
resource := out.Resource.Job["my_job"].(*schema.ResourceJob)
var resource schema.ResourceJob
out := produceTerraformConfiguration(t, config)
convertToResourceStruct(t, &resource, out.Resource.Job["my_job"])
assert.Equal(t, "my job", resource.Name)
assert.Len(t, resource.JobCluster, 1)
@ -68,8 +90,6 @@ func TestBundleToTerraformJob(t *testing.T) {
assert.Equal(t, "param1", resource.Parameter[0].Name)
assert.Equal(t, "param2", resource.Parameter[1].Name)
assert.Nil(t, out.Data)
bundleToTerraformEquivalenceTest(t, &config)
}
func TestBundleToTerraformJobPermissions(t *testing.T) {
@ -90,15 +110,14 @@ func TestBundleToTerraformJobPermissions(t *testing.T) {
},
}
out := BundleToTerraform(&config)
resource := out.Resource.Permissions["job_my_job"].(*schema.ResourcePermissions)
var resource schema.ResourcePermissions
out := produceTerraformConfiguration(t, config)
convertToResourceStruct(t, &resource, out.Resource.Permissions["job_my_job"])
assert.NotEmpty(t, resource.JobId)
assert.Len(t, resource.AccessControl, 1)
assert.Equal(t, "jane@doe.com", resource.AccessControl[0].UserName)
assert.Equal(t, "CAN_VIEW", resource.AccessControl[0].PermissionLevel)
bundleToTerraformEquivalenceTest(t, &config)
}
func TestBundleToTerraformJobTaskLibraries(t *testing.T) {
@ -128,15 +147,14 @@ func TestBundleToTerraformJobTaskLibraries(t *testing.T) {
},
}
out := BundleToTerraform(&config)
resource := out.Resource.Job["my_job"].(*schema.ResourceJob)
var resource schema.ResourceJob
out := produceTerraformConfiguration(t, config)
convertToResourceStruct(t, &resource, out.Resource.Job["my_job"])
assert.Equal(t, "my job", resource.Name)
require.Len(t, resource.Task, 1)
require.Len(t, resource.Task[0].Library, 1)
assert.Equal(t, "mlflow", resource.Task[0].Library[0].Pypi.Package)
bundleToTerraformEquivalenceTest(t, &config)
}
func TestBundleToTerraformForEachTaskLibraries(t *testing.T) {
@ -172,15 +190,14 @@ func TestBundleToTerraformForEachTaskLibraries(t *testing.T) {
},
}
out := BundleToTerraform(&config)
resource := out.Resource.Job["my_job"].(*schema.ResourceJob)
var resource schema.ResourceJob
out := produceTerraformConfiguration(t, config)
convertToResourceStruct(t, &resource, out.Resource.Job["my_job"])
assert.Equal(t, "my job", resource.Name)
require.Len(t, resource.Task, 1)
require.Len(t, resource.Task[0].ForEachTask.Task.Library, 1)
assert.Equal(t, "mlflow", resource.Task[0].ForEachTask.Task.Library[0].Pypi.Package)
bundleToTerraformEquivalenceTest(t, &config)
}
func TestBundleToTerraformPipeline(t *testing.T) {
@ -230,8 +247,9 @@ func TestBundleToTerraformPipeline(t *testing.T) {
},
}
out := BundleToTerraform(&config)
resource := out.Resource.Pipeline["my_pipeline"].(*schema.ResourcePipeline)
var resource schema.ResourcePipeline
out := produceTerraformConfiguration(t, config)
convertToResourceStruct(t, &resource, out.Resource.Pipeline["my_pipeline"])
assert.Equal(t, "my pipeline", resource.Name)
assert.Len(t, resource.Library, 2)
@ -241,8 +259,6 @@ func TestBundleToTerraformPipeline(t *testing.T) {
assert.Equal(t, resource.Notification[1].Alerts, []string{"on-update-failure", "on-flow-failure"})
assert.Equal(t, resource.Notification[1].EmailRecipients, []string{"jane@doe.com", "john@doe.com"})
assert.Nil(t, out.Data)
bundleToTerraformEquivalenceTest(t, &config)
}
func TestBundleToTerraformPipelinePermissions(t *testing.T) {
@ -263,15 +279,14 @@ func TestBundleToTerraformPipelinePermissions(t *testing.T) {
},
}
out := BundleToTerraform(&config)
resource := out.Resource.Permissions["pipeline_my_pipeline"].(*schema.ResourcePermissions)
var resource schema.ResourcePermissions
out := produceTerraformConfiguration(t, config)
convertToResourceStruct(t, &resource, out.Resource.Permissions["pipeline_my_pipeline"])
assert.NotEmpty(t, resource.PipelineId)
assert.Len(t, resource.AccessControl, 1)
assert.Equal(t, "jane@doe.com", resource.AccessControl[0].UserName)
assert.Equal(t, "CAN_VIEW", resource.AccessControl[0].PermissionLevel)
bundleToTerraformEquivalenceTest(t, &config)
}
func TestBundleToTerraformModel(t *testing.T) {
@ -300,8 +315,9 @@ func TestBundleToTerraformModel(t *testing.T) {
},
}
out := BundleToTerraform(&config)
resource := out.Resource.MlflowModel["my_model"].(*schema.ResourceMlflowModel)
var resource schema.ResourceMlflowModel
out := produceTerraformConfiguration(t, config)
convertToResourceStruct(t, &resource, out.Resource.MlflowModel["my_model"])
assert.Equal(t, "name", resource.Name)
assert.Equal(t, "description", resource.Description)
@ -311,8 +327,6 @@ func TestBundleToTerraformModel(t *testing.T) {
assert.Equal(t, "k2", resource.Tags[1].Key)
assert.Equal(t, "v2", resource.Tags[1].Value)
assert.Nil(t, out.Data)
bundleToTerraformEquivalenceTest(t, &config)
}
func TestBundleToTerraformModelPermissions(t *testing.T) {
@ -336,15 +350,14 @@ func TestBundleToTerraformModelPermissions(t *testing.T) {
},
}
out := BundleToTerraform(&config)
resource := out.Resource.Permissions["mlflow_model_my_model"].(*schema.ResourcePermissions)
var resource schema.ResourcePermissions
out := produceTerraformConfiguration(t, config)
convertToResourceStruct(t, &resource, out.Resource.Permissions["mlflow_model_my_model"])
assert.NotEmpty(t, resource.RegisteredModelId)
assert.Len(t, resource.AccessControl, 1)
assert.Equal(t, "jane@doe.com", resource.AccessControl[0].UserName)
assert.Equal(t, "CAN_READ", resource.AccessControl[0].PermissionLevel)
bundleToTerraformEquivalenceTest(t, &config)
}
func TestBundleToTerraformExperiment(t *testing.T) {
@ -362,13 +375,12 @@ func TestBundleToTerraformExperiment(t *testing.T) {
},
}
out := BundleToTerraform(&config)
resource := out.Resource.MlflowExperiment["my_experiment"].(*schema.ResourceMlflowExperiment)
var resource schema.ResourceMlflowExperiment
out := produceTerraformConfiguration(t, config)
convertToResourceStruct(t, &resource, out.Resource.MlflowExperiment["my_experiment"])
assert.Equal(t, "name", resource.Name)
assert.Nil(t, out.Data)
bundleToTerraformEquivalenceTest(t, &config)
}
func TestBundleToTerraformExperimentPermissions(t *testing.T) {
@ -392,15 +404,14 @@ func TestBundleToTerraformExperimentPermissions(t *testing.T) {
},
}
out := BundleToTerraform(&config)
resource := out.Resource.Permissions["mlflow_experiment_my_experiment"].(*schema.ResourcePermissions)
var resource schema.ResourcePermissions
out := produceTerraformConfiguration(t, config)
convertToResourceStruct(t, &resource, out.Resource.Permissions["mlflow_experiment_my_experiment"])
assert.NotEmpty(t, resource.ExperimentId)
assert.Len(t, resource.AccessControl, 1)
assert.Equal(t, "jane@doe.com", resource.AccessControl[0].UserName)
assert.Equal(t, "CAN_READ", resource.AccessControl[0].PermissionLevel)
bundleToTerraformEquivalenceTest(t, &config)
}
func TestBundleToTerraformModelServing(t *testing.T) {
@ -436,8 +447,9 @@ func TestBundleToTerraformModelServing(t *testing.T) {
},
}
out := BundleToTerraform(&config)
resource := out.Resource.ModelServing["my_model_serving_endpoint"].(*schema.ResourceModelServing)
var resource schema.ResourceModelServing
out := produceTerraformConfiguration(t, config)
convertToResourceStruct(t, &resource, out.Resource.ModelServing["my_model_serving_endpoint"])
assert.Equal(t, "name", resource.Name)
assert.Equal(t, "model_name", resource.Config.ServedModels[0].ModelName)
@ -447,8 +459,6 @@ func TestBundleToTerraformModelServing(t *testing.T) {
assert.Equal(t, "model_name-1", resource.Config.TrafficConfig.Routes[0].ServedModelName)
assert.Equal(t, 100, resource.Config.TrafficConfig.Routes[0].TrafficPercentage)
assert.Nil(t, out.Data)
bundleToTerraformEquivalenceTest(t, &config)
}
func TestBundleToTerraformModelServingPermissions(t *testing.T) {
@ -490,15 +500,14 @@ func TestBundleToTerraformModelServingPermissions(t *testing.T) {
},
}
out := BundleToTerraform(&config)
resource := out.Resource.Permissions["model_serving_my_model_serving_endpoint"].(*schema.ResourcePermissions)
var resource schema.ResourcePermissions
out := produceTerraformConfiguration(t, config)
convertToResourceStruct(t, &resource, out.Resource.Permissions["model_serving_my_model_serving_endpoint"])
assert.NotEmpty(t, resource.ServingEndpointId)
assert.Len(t, resource.AccessControl, 1)
assert.Equal(t, "jane@doe.com", resource.AccessControl[0].UserName)
assert.Equal(t, "CAN_VIEW", resource.AccessControl[0].PermissionLevel)
bundleToTerraformEquivalenceTest(t, &config)
}
func TestBundleToTerraformRegisteredModel(t *testing.T) {
@ -519,16 +528,15 @@ func TestBundleToTerraformRegisteredModel(t *testing.T) {
},
}
out := BundleToTerraform(&config)
resource := out.Resource.RegisteredModel["my_registered_model"].(*schema.ResourceRegisteredModel)
var resource schema.ResourceRegisteredModel
out := produceTerraformConfiguration(t, config)
convertToResourceStruct(t, &resource, out.Resource.RegisteredModel["my_registered_model"])
assert.Equal(t, "name", resource.Name)
assert.Equal(t, "catalog", resource.CatalogName)
assert.Equal(t, "schema", resource.SchemaName)
assert.Equal(t, "comment", resource.Comment)
assert.Nil(t, out.Data)
bundleToTerraformEquivalenceTest(t, &config)
}
func TestBundleToTerraformRegisteredModelGrants(t *testing.T) {
@ -554,15 +562,14 @@ func TestBundleToTerraformRegisteredModelGrants(t *testing.T) {
},
}
out := BundleToTerraform(&config)
resource := out.Resource.Grants["registered_model_my_registered_model"].(*schema.ResourceGrants)
var resource schema.ResourceGrants
out := produceTerraformConfiguration(t, config)
convertToResourceStruct(t, &resource, out.Resource.Grants["registered_model_my_registered_model"])
assert.NotEmpty(t, resource.Function)
assert.Len(t, resource.Grant, 1)
assert.Equal(t, "jane@doe.com", resource.Grant[0].Principal)
assert.Equal(t, "EXECUTE", resource.Grant[0].Privileges[0])
bundleToTerraformEquivalenceTest(t, &config)
}
func TestBundleToTerraformDeletedResources(t *testing.T) {
@ -679,6 +686,14 @@ func TestTerraformToBundleEmptyLocalResources(t *testing.T) {
{Attributes: stateInstanceAttributes{ID: "1"}},
},
},
{
Type: "databricks_dashboard",
Mode: "managed",
Name: "test_dashboard",
Instances: []stateResourceInstance{
{Attributes: stateInstanceAttributes{ID: "1"}},
},
},
},
}
err := TerraformToBundle(&tfState, &config)
@ -714,6 +729,9 @@ func TestTerraformToBundleEmptyLocalResources(t *testing.T) {
assert.Equal(t, "1", config.Resources.Clusters["test_cluster"].ID)
assert.Equal(t, resources.ModifiedStatusDeleted, config.Resources.Clusters["test_cluster"].ModifiedStatus)
assert.Equal(t, "1", config.Resources.Dashboards["test_dashboard"].ID)
assert.Equal(t, resources.ModifiedStatusDeleted, config.Resources.Dashboards["test_dashboard"].ModifiedStatus)
AssertFullResourceCoverage(t, &config)
}
@ -790,6 +808,13 @@ func TestTerraformToBundleEmptyRemoteResources(t *testing.T) {
},
},
},
Dashboards: map[string]*resources.Dashboard{
"test_dashboard": {
CreateDashboardRequest: &dashboards.CreateDashboardRequest{
DisplayName: "test_dashboard",
},
},
},
},
}
var tfState = resourcesState{
@ -828,6 +853,9 @@ func TestTerraformToBundleEmptyRemoteResources(t *testing.T) {
assert.Equal(t, "", config.Resources.Clusters["test_cluster"].ID)
assert.Equal(t, resources.ModifiedStatusCreated, config.Resources.Clusters["test_cluster"].ModifiedStatus)
assert.Equal(t, "", config.Resources.Dashboards["test_dashboard"].ID)
assert.Equal(t, resources.ModifiedStatusCreated, config.Resources.Dashboards["test_dashboard"].ModifiedStatus)
AssertFullResourceCoverage(t, &config)
}
@ -954,6 +982,18 @@ func TestTerraformToBundleModifiedResources(t *testing.T) {
},
},
},
Dashboards: map[string]*resources.Dashboard{
"test_dashboard": {
CreateDashboardRequest: &dashboards.CreateDashboardRequest{
DisplayName: "test_dashboard",
},
},
"test_dashboard_new": {
CreateDashboardRequest: &dashboards.CreateDashboardRequest{
DisplayName: "test_dashboard_new",
},
},
},
},
}
var tfState = resourcesState{
@ -1118,6 +1158,22 @@ func TestTerraformToBundleModifiedResources(t *testing.T) {
{Attributes: stateInstanceAttributes{ID: "2"}},
},
},
{
Type: "databricks_dashboard",
Mode: "managed",
Name: "test_dashboard",
Instances: []stateResourceInstance{
{Attributes: stateInstanceAttributes{ID: "1"}},
},
},
{
Type: "databricks_dashboard",
Mode: "managed",
Name: "test_dashboard_old",
Instances: []stateResourceInstance{
{Attributes: stateInstanceAttributes{ID: "2"}},
},
},
},
}
err := TerraformToBundle(&tfState, &config)
@ -1193,6 +1249,13 @@ func TestTerraformToBundleModifiedResources(t *testing.T) {
assert.Equal(t, "", config.Resources.Clusters["test_cluster_new"].ID)
assert.Equal(t, resources.ModifiedStatusCreated, config.Resources.Clusters["test_cluster_new"].ModifiedStatus)
assert.Equal(t, "1", config.Resources.Dashboards["test_dashboard"].ID)
assert.Equal(t, "", config.Resources.Dashboards["test_dashboard"].ModifiedStatus)
assert.Equal(t, "2", config.Resources.Dashboards["test_dashboard_old"].ID)
assert.Equal(t, resources.ModifiedStatusDeleted, config.Resources.Dashboards["test_dashboard_old"].ModifiedStatus)
assert.Equal(t, "", config.Resources.Dashboards["test_dashboard_new"].ID)
assert.Equal(t, resources.ModifiedStatusCreated, config.Resources.Dashboards["test_dashboard_new"].ModifiedStatus)
AssertFullResourceCoverage(t, &config)
}
@ -1210,25 +1273,3 @@ func AssertFullResourceCoverage(t *testing.T, config *config.Root) {
}
}
}
func assertEqualTerraformRoot(t *testing.T, a, b *schema.Root) {
ba, err := json.Marshal(a)
require.NoError(t, err)
bb, err := json.Marshal(b)
require.NoError(t, err)
assert.JSONEq(t, string(ba), string(bb))
}
func bundleToTerraformEquivalenceTest(t *testing.T, config *config.Root) {
t.Run("dyn equivalence", func(t *testing.T) {
tf1 := BundleToTerraform(config)
vin, err := convert.FromTyped(config, dyn.NilValue)
require.NoError(t, err)
tf2, err := BundleToTerraformWithDynValue(context.Background(), vin)
require.NoError(t, err)
// Compare roots
assertEqualTerraformRoot(t, tf1, tf2)
})
}

View File

@ -62,6 +62,8 @@ func (m *interpolateMutator) Apply(ctx context.Context, b *bundle.Bundle) diag.D
path = dyn.NewPath(dyn.Key("databricks_volume")).Append(path[2:]...)
case dyn.Key("clusters"):
path = dyn.NewPath(dyn.Key("databricks_cluster")).Append(path[2:]...)
case dyn.Key("dashboards"):
path = dyn.NewPath(dyn.Key("databricks_dashboard")).Append(path[2:]...)
default:
// Trigger "key not found" for unknown resource types.
return dyn.GetByPath(root, path)

View File

@ -33,6 +33,7 @@ func TestInterpolate(t *testing.T) {
"other_schema": "${resources.schemas.other_schema.id}",
"other_volume": "${resources.volumes.other_volume.id}",
"other_cluster": "${resources.clusters.other_cluster.id}",
"other_dashboard": "${resources.dashboards.other_dashboard.id}",
},
Tasks: []jobs.Task{
{
@ -71,6 +72,7 @@ func TestInterpolate(t *testing.T) {
assert.Equal(t, "${databricks_schema.other_schema.id}", j.Tags["other_schema"])
assert.Equal(t, "${databricks_volume.other_volume.id}", j.Tags["other_volume"])
assert.Equal(t, "${databricks_cluster.other_cluster.id}", j.Tags["other_cluster"])
assert.Equal(t, "${databricks_dashboard.other_dashboard.id}", j.Tags["other_dashboard"])
m := b.Config.Resources.Models["my_model"]
assert.Equal(t, "my_model", m.Model.Name)

View File

@ -0,0 +1,109 @@
package tfdyn
import (
"context"
"encoding/json"
"fmt"
"github.com/databricks/cli/bundle/internal/tf/schema"
"github.com/databricks/cli/libs/dyn"
"github.com/databricks/cli/libs/dyn/convert"
"github.com/databricks/cli/libs/log"
)
const (
filePathFieldName = "file_path"
serializedDashboardFieldName = "serialized_dashboard"
)
// Marshal "serialized_dashboard" as JSON if it is set in the input but not in the output.
func marshalSerializedDashboard(vin dyn.Value, vout dyn.Value) (dyn.Value, error) {
// Skip if the "serialized_dashboard" field is already set.
if v := vout.Get(serializedDashboardFieldName); v.IsValid() {
return vout, nil
}
// Skip if the "serialized_dashboard" field on the input is not set.
v := vin.Get(serializedDashboardFieldName)
if !v.IsValid() {
return vout, nil
}
// Marshal the "serialized_dashboard" field as JSON.
data, err := json.Marshal(v.AsAny())
if err != nil {
return dyn.InvalidValue, fmt.Errorf("failed to marshal serialized_dashboard: %w", err)
}
// Set the "serialized_dashboard" field on the output.
return dyn.Set(vout, serializedDashboardFieldName, dyn.V(string(data)))
}
func convertDashboardResource(ctx context.Context, vin dyn.Value) (dyn.Value, error) {
var err error
// Normalize the output value to the target schema.
vout, diags := convert.Normalize(schema.ResourceDashboard{}, vin)
for _, diag := range diags {
log.Debugf(ctx, "dashboard normalization diagnostic: %s", diag.Summary)
}
// Include "serialized_dashboard" field if "file_path" is set.
// Note: the Terraform resource supports "file_path" natively, but its
// change detection mechanism doesn't work as expected at the time of writing (Sep 30).
if path, ok := vout.Get(filePathFieldName).AsString(); ok {
vout, err = dyn.Set(vout, serializedDashboardFieldName, dyn.V(fmt.Sprintf("${file(%q)}", path)))
if err != nil {
return dyn.InvalidValue, fmt.Errorf("failed to set serialized_dashboard: %w", err)
}
// Drop the "file_path" field. It is mutually exclusive with "serialized_dashboard".
vout, err = dyn.Walk(vout, func(p dyn.Path, v dyn.Value) (dyn.Value, error) {
switch len(p) {
case 0:
return v, nil
case 1:
if p[0] == dyn.Key(filePathFieldName) {
return v, dyn.ErrDrop
}
}
// Skip everything else.
return v, dyn.ErrSkip
})
if err != nil {
return dyn.InvalidValue, fmt.Errorf("failed to drop file_path: %w", err)
}
}
// Marshal "serialized_dashboard" as JSON if it is set in the input but not in the output.
vout, err = marshalSerializedDashboard(vin, vout)
if err != nil {
return dyn.InvalidValue, err
}
return vout, nil
}
type dashboardConverter struct{}
func (dashboardConverter) Convert(ctx context.Context, key string, vin dyn.Value, out *schema.Resources) error {
vout, err := convertDashboardResource(ctx, vin)
if err != nil {
return err
}
// Add the converted resource to the output.
out.Dashboard[key] = vout.AsAny()
// Configure permissions for this resource.
if permissions := convertPermissionsResource(ctx, vin); permissions != nil {
permissions.DashboardId = fmt.Sprintf("${databricks_dashboard.%s.id}", key)
out.Permissions["dashboard_"+key] = permissions
}
return nil
}
func init() {
registerConverter("dashboards", dashboardConverter{})
}

View File

@ -0,0 +1,153 @@
package tfdyn
import (
"context"
"testing"
"github.com/databricks/cli/bundle/config/resources"
"github.com/databricks/cli/bundle/internal/tf/schema"
"github.com/databricks/cli/libs/dyn"
"github.com/databricks/cli/libs/dyn/convert"
"github.com/databricks/databricks-sdk-go/service/dashboards"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestConvertDashboard(t *testing.T) {
var src = resources.Dashboard{
CreateDashboardRequest: &dashboards.CreateDashboardRequest{
DisplayName: "my dashboard",
WarehouseId: "f00dcafe",
ParentPath: "/some/path",
},
EmbedCredentials: true,
Permissions: []resources.Permission{
{
Level: "CAN_VIEW",
UserName: "jane@doe.com",
},
},
}
vin, err := convert.FromTyped(src, dyn.NilValue)
require.NoError(t, err)
ctx := context.Background()
out := schema.NewResources()
err = dashboardConverter{}.Convert(ctx, "my_dashboard", vin, out)
require.NoError(t, err)
// Assert equality on the job
assert.Equal(t, map[string]any{
"display_name": "my dashboard",
"warehouse_id": "f00dcafe",
"parent_path": "/some/path",
"embed_credentials": true,
}, out.Dashboard["my_dashboard"])
// Assert equality on the permissions
assert.Equal(t, &schema.ResourcePermissions{
DashboardId: "${databricks_dashboard.my_dashboard.id}",
AccessControl: []schema.ResourcePermissionsAccessControl{
{
PermissionLevel: "CAN_VIEW",
UserName: "jane@doe.com",
},
},
}, out.Permissions["dashboard_my_dashboard"])
}
func TestConvertDashboardFilePath(t *testing.T) {
var src = resources.Dashboard{
FilePath: "some/path",
}
vin, err := convert.FromTyped(src, dyn.NilValue)
require.NoError(t, err)
ctx := context.Background()
out := schema.NewResources()
err = dashboardConverter{}.Convert(ctx, "my_dashboard", vin, out)
require.NoError(t, err)
// Assert that the "serialized_dashboard" is included.
assert.Subset(t, out.Dashboard["my_dashboard"], map[string]any{
"serialized_dashboard": "${file(\"some/path\")}",
})
// Assert that the "file_path" doesn't carry over.
assert.NotSubset(t, out.Dashboard["my_dashboard"], map[string]any{
"file_path": "some/path",
})
}
func TestConvertDashboardFilePathQuoted(t *testing.T) {
var src = resources.Dashboard{
FilePath: `C:\foo\bar\baz\dashboard.lvdash.json`,
}
vin, err := convert.FromTyped(src, dyn.NilValue)
require.NoError(t, err)
ctx := context.Background()
out := schema.NewResources()
err = dashboardConverter{}.Convert(ctx, "my_dashboard", vin, out)
require.NoError(t, err)
// Assert that the "serialized_dashboard" is included.
assert.Subset(t, out.Dashboard["my_dashboard"], map[string]any{
"serialized_dashboard": `${file("C:\\foo\\bar\\baz\\dashboard.lvdash.json")}`,
})
// Assert that the "file_path" doesn't carry over.
assert.NotSubset(t, out.Dashboard["my_dashboard"], map[string]any{
"file_path": `C:\foo\bar\baz\dashboard.lvdash.json`,
})
}
func TestConvertDashboardSerializedDashboardString(t *testing.T) {
var src = resources.Dashboard{
SerializedDashboard: `{ "json": true }`,
}
vin, err := convert.FromTyped(src, dyn.NilValue)
require.NoError(t, err)
ctx := context.Background()
out := schema.NewResources()
err = dashboardConverter{}.Convert(ctx, "my_dashboard", vin, out)
require.NoError(t, err)
// Assert that the "serialized_dashboard" is included.
assert.Subset(t, out.Dashboard["my_dashboard"], map[string]any{
"serialized_dashboard": `{ "json": true }`,
})
}
func TestConvertDashboardSerializedDashboardAny(t *testing.T) {
var src = resources.Dashboard{
SerializedDashboard: map[string]any{
"pages": []map[string]any{
{
"displayName": "New Page",
"layout": []map[string]any{},
},
},
},
}
vin, err := convert.FromTyped(src, dyn.NilValue)
require.NoError(t, err)
ctx := context.Background()
out := schema.NewResources()
err = dashboardConverter{}.Convert(ctx, "my_dashboard", vin, out)
require.NoError(t, err)
// Assert that the "serialized_dashboard" is included.
assert.Subset(t, out.Dashboard["my_dashboard"], map[string]any{
"serialized_dashboard": `{"pages":[{"displayName":"New Page","layout":[]}]}`,
})
}

View File

@ -13,7 +13,7 @@ import (
// Partial representation of the Terraform state file format.
// We are only interested global version and serial numbers,
// plus resource types, names, modes, and ids.
// plus resource types, names, modes, IDs, and ETags (for dashboards).
type resourcesState struct {
Version int `json:"version"`
Resources []stateResource `json:"resources"`
@ -33,7 +33,8 @@ type stateResourceInstance struct {
}
type stateInstanceAttributes struct {
ID string `json:"id"`
ID string `json:"id"`
ETag string `json:"etag,omitempty"`
}
func ParseResourcesState(ctx context.Context, b *bundle.Bundle) (*resourcesState, error) {

View File

@ -0,0 +1,20 @@
package bundletest
import (
"context"
"testing"
"github.com/databricks/cli/bundle"
"github.com/databricks/cli/libs/diag"
"github.com/databricks/cli/libs/dyn"
"github.com/stretchr/testify/require"
)
func Mutate(t *testing.T, b *bundle.Bundle, f func(v dyn.Value) (dyn.Value, error)) {
diags := bundle.ApplyFunc(context.Background(), b, func(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
err := b.Config.Mutate(f)
require.NoError(t, err)
return nil
})
require.NoError(t, diags.Error())
}

View File

@ -1,3 +1,3 @@
package schema
const ProviderVersion = "1.53.0"
const ProviderVersion = "1.54.0"

View File

@ -0,0 +1,15 @@
// Generated from Databricks Terraform provider schema. DO NOT EDIT.
package schema
type DataSourceNotificationDestinationsNotificationDestinations struct {
DestinationType string `json:"destination_type,omitempty"`
DisplayName string `json:"display_name,omitempty"`
Id string `json:"id,omitempty"`
}
type DataSourceNotificationDestinations struct {
DisplayNameContains string `json:"display_name_contains,omitempty"`
Type string `json:"type,omitempty"`
NotificationDestinations []DataSourceNotificationDestinationsNotificationDestinations `json:"notification_destinations,omitempty"`
}

View File

@ -0,0 +1,32 @@
// Generated from Databricks Terraform provider schema. DO NOT EDIT.
package schema
type DataSourceRegisteredModelModelInfoAliases struct {
AliasName string `json:"alias_name,omitempty"`
VersionNum int `json:"version_num,omitempty"`
}
type DataSourceRegisteredModelModelInfo struct {
BrowseOnly bool `json:"browse_only,omitempty"`
CatalogName string `json:"catalog_name,omitempty"`
Comment string `json:"comment,omitempty"`
CreatedAt int `json:"created_at,omitempty"`
CreatedBy string `json:"created_by,omitempty"`
FullName string `json:"full_name,omitempty"`
MetastoreId string `json:"metastore_id,omitempty"`
Name string `json:"name,omitempty"`
Owner string `json:"owner,omitempty"`
SchemaName string `json:"schema_name,omitempty"`
StorageLocation string `json:"storage_location,omitempty"`
UpdatedAt int `json:"updated_at,omitempty"`
UpdatedBy string `json:"updated_by,omitempty"`
Aliases []DataSourceRegisteredModelModelInfoAliases `json:"aliases,omitempty"`
}
type DataSourceRegisteredModel struct {
FullName string `json:"full_name"`
IncludeAliases bool `json:"include_aliases,omitempty"`
IncludeBrowse bool `json:"include_browse,omitempty"`
ModelInfo []DataSourceRegisteredModelModelInfo `json:"model_info,omitempty"`
}

View File

@ -36,7 +36,9 @@ type DataSources struct {
NodeType map[string]any `json:"databricks_node_type,omitempty"`
Notebook map[string]any `json:"databricks_notebook,omitempty"`
NotebookPaths map[string]any `json:"databricks_notebook_paths,omitempty"`
NotificationDestinations map[string]any `json:"databricks_notification_destinations,omitempty"`
Pipelines map[string]any `json:"databricks_pipelines,omitempty"`
RegisteredModel map[string]any `json:"databricks_registered_model,omitempty"`
Schema map[string]any `json:"databricks_schema,omitempty"`
Schemas map[string]any `json:"databricks_schemas,omitempty"`
ServicePrincipal map[string]any `json:"databricks_service_principal,omitempty"`
@ -92,7 +94,9 @@ func NewDataSources() *DataSources {
NodeType: make(map[string]any),
Notebook: make(map[string]any),
NotebookPaths: make(map[string]any),
NotificationDestinations: make(map[string]any),
Pipelines: make(map[string]any),
RegisteredModel: make(map[string]any),
Schema: make(map[string]any),
Schemas: make(map[string]any),
ServicePrincipal: make(map[string]any),

View File

@ -1448,6 +1448,7 @@ type ResourceJobWebhookNotifications struct {
type ResourceJob struct {
AlwaysRunning bool `json:"always_running,omitempty"`
BudgetPolicyId string `json:"budget_policy_id,omitempty"`
ControlRunState bool `json:"control_run_state,omitempty"`
Description string `json:"description,omitempty"`
EditMode string `json:"edit_mode,omitempty"`

View File

@ -19,9 +19,10 @@ type ResourceOnlineTableSpec struct {
}
type ResourceOnlineTable struct {
Id string `json:"id,omitempty"`
Name string `json:"name"`
Status []any `json:"status,omitempty"`
TableServingUrl string `json:"table_serving_url,omitempty"`
Spec *ResourceOnlineTableSpec `json:"spec,omitempty"`
Id string `json:"id,omitempty"`
Name string `json:"name"`
Status []any `json:"status,omitempty"`
TableServingUrl string `json:"table_serving_url,omitempty"`
UnityCatalogProvisioningState string `json:"unity_catalog_provisioning_state,omitempty"`
Spec *ResourceOnlineTableSpec `json:"spec,omitempty"`
}

View File

@ -142,10 +142,26 @@ type ResourcePipelineGatewayDefinition struct {
GatewayStorageSchema string `json:"gateway_storage_schema,omitempty"`
}
type ResourcePipelineIngestionDefinitionObjectsReportTableConfiguration struct {
PrimaryKeys []string `json:"primary_keys,omitempty"`
SalesforceIncludeFormulaFields bool `json:"salesforce_include_formula_fields,omitempty"`
ScdType string `json:"scd_type,omitempty"`
SequenceBy []string `json:"sequence_by,omitempty"`
}
type ResourcePipelineIngestionDefinitionObjectsReport struct {
DestinationCatalog string `json:"destination_catalog,omitempty"`
DestinationSchema string `json:"destination_schema,omitempty"`
DestinationTable string `json:"destination_table,omitempty"`
SourceUrl string `json:"source_url,omitempty"`
TableConfiguration *ResourcePipelineIngestionDefinitionObjectsReportTableConfiguration `json:"table_configuration,omitempty"`
}
type ResourcePipelineIngestionDefinitionObjectsSchemaTableConfiguration struct {
PrimaryKeys []string `json:"primary_keys,omitempty"`
SalesforceIncludeFormulaFields bool `json:"salesforce_include_formula_fields,omitempty"`
ScdType string `json:"scd_type,omitempty"`
SequenceBy []string `json:"sequence_by,omitempty"`
}
type ResourcePipelineIngestionDefinitionObjectsSchema struct {
@ -160,6 +176,7 @@ type ResourcePipelineIngestionDefinitionObjectsTableTableConfiguration struct {
PrimaryKeys []string `json:"primary_keys,omitempty"`
SalesforceIncludeFormulaFields bool `json:"salesforce_include_formula_fields,omitempty"`
ScdType string `json:"scd_type,omitempty"`
SequenceBy []string `json:"sequence_by,omitempty"`
}
type ResourcePipelineIngestionDefinitionObjectsTable struct {
@ -173,6 +190,7 @@ type ResourcePipelineIngestionDefinitionObjectsTable struct {
}
type ResourcePipelineIngestionDefinitionObjects struct {
Report *ResourcePipelineIngestionDefinitionObjectsReport `json:"report,omitempty"`
Schema *ResourcePipelineIngestionDefinitionObjectsSchema `json:"schema,omitempty"`
Table *ResourcePipelineIngestionDefinitionObjectsTable `json:"table,omitempty"`
}
@ -181,6 +199,7 @@ type ResourcePipelineIngestionDefinitionTableConfiguration struct {
PrimaryKeys []string `json:"primary_keys,omitempty"`
SalesforceIncludeFormulaFields bool `json:"salesforce_include_formula_fields,omitempty"`
ScdType string `json:"scd_type,omitempty"`
SequenceBy []string `json:"sequence_by,omitempty"`
}
type ResourcePipelineIngestionDefinition struct {

View File

@ -21,7 +21,7 @@ type Root struct {
const ProviderHost = "registry.terraform.io"
const ProviderSource = "databricks/databricks"
const ProviderVersion = "1.53.0"
const ProviderVersion = "1.54.0"
func NewRoot() *Root {
return &Root{

View File

@ -57,6 +57,12 @@ func IsLibraryLocal(dep string) bool {
}
}
// If the dependency starts with --, it's a pip flag option which is a valid
// entry for environment dependencies but not a local path
if containsPipFlag(dep) {
return false
}
// If the dependency is a requirements file, it's not a valid local path
if strings.HasPrefix(dep, "-r") {
return false
@ -70,6 +76,11 @@ func IsLibraryLocal(dep string) bool {
return IsLocalPath(dep)
}
func containsPipFlag(input string) bool {
re := regexp.MustCompile(`--[a-zA-Z0-9-]+`)
return re.MatchString(input)
}
// ^[a-zA-Z0-9\-_]+: Matches the package name, allowing alphanumeric characters, dashes (-), and underscores (_).
// \[.*\])?: Optionally matches any extras specified in square brackets, e.g., [security].
// ((==|!=|<=|>=|~=|>|<)\d+(\.\d+){0,2}(\.\*)?): Optionally matches version specifiers, supporting various operators (==, !=, etc.) followed by a version number (e.g., 2.25.1).

View File

@ -36,3 +36,12 @@ func IsWorkspaceLibrary(library *compute.Library) bool {
return IsWorkspacePath(path)
}
// IsVolumesPath returns true if the specified path indicates that
func IsVolumesPath(path string) bool {
return strings.HasPrefix(path, "/Volumes/")
}
func IsWorkspaceSharedPath(path string) bool {
return strings.HasPrefix(path, "/Workspace/Shared/")
}

View File

@ -31,3 +31,13 @@ func TestIsWorkspaceLibrary(t *testing.T) {
// Empty.
assert.False(t, IsWorkspaceLibrary(&compute.Library{}))
}
func TestIsVolumesPath(t *testing.T) {
// Absolute paths with particular prefixes.
assert.True(t, IsVolumesPath("/Volumes/path/to/package"))
// Relative paths.
assert.False(t, IsVolumesPath("myfile.txt"))
assert.False(t, IsVolumesPath("./myfile.txt"))
assert.False(t, IsVolumesPath("../myfile.txt"))
}

View File

@ -39,6 +39,10 @@ var levelsMap = map[string](map[string]string){
CAN_VIEW: "CAN_VIEW",
CAN_RUN: "CAN_QUERY",
},
"dashboards": {
CAN_MANAGE: "CAN_MANAGE",
CAN_VIEW: "CAN_READ",
},
}
type bundlePermissions struct{}

View File

@ -0,0 +1,52 @@
package permissions
import (
"context"
"fmt"
"github.com/databricks/cli/bundle"
"github.com/databricks/cli/bundle/libraries"
"github.com/databricks/cli/libs/diag"
)
type validateSharedRootPermissions struct {
}
func ValidateSharedRootPermissions() bundle.Mutator {
return &validateSharedRootPermissions{}
}
func (*validateSharedRootPermissions) Name() string {
return "ValidateSharedRootPermissions"
}
func (*validateSharedRootPermissions) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
if libraries.IsWorkspaceSharedPath(b.Config.Workspace.RootPath) {
return isUsersGroupPermissionSet(b)
}
return nil
}
// isUsersGroupPermissionSet checks that top-level permissions set for bundle contain group_name: users with CAN_MANAGE permission.
func isUsersGroupPermissionSet(b *bundle.Bundle) diag.Diagnostics {
var diags diag.Diagnostics
allUsers := false
for _, p := range b.Config.Permissions {
if p.GroupName == "users" && p.Level == CAN_MANAGE {
allUsers = true
break
}
}
if !allUsers {
diags = diags.Append(diag.Diagnostic{
Severity: diag.Warning,
Summary: fmt.Sprintf("the bundle root path %s is writable by all workspace users", b.Config.Workspace.RootPath),
Detail: "The bundle is configured to use /Workspace/Shared, which will give read/write access to all users. If this is intentional, add CAN_MANAGE for 'group_name: users' permission to your bundle configuration. If the deployment should be restricted, move it to a restricted folder such as /Workspace/Users/<username or principal name>.",
})
}
return diags
}

View File

@ -0,0 +1,66 @@
package permissions
import (
"context"
"testing"
"github.com/databricks/cli/bundle"
"github.com/databricks/cli/bundle/config"
"github.com/databricks/cli/bundle/config/resources"
"github.com/databricks/cli/libs/diag"
"github.com/databricks/databricks-sdk-go/experimental/mocks"
"github.com/databricks/databricks-sdk-go/service/jobs"
"github.com/stretchr/testify/require"
)
func TestValidateSharedRootPermissionsForShared(t *testing.T) {
b := &bundle.Bundle{
Config: config.Root{
Workspace: config.Workspace{
RootPath: "/Workspace/Shared/foo/bar",
},
Permissions: []resources.Permission{
{Level: CAN_MANAGE, GroupName: "users"},
},
Resources: config.Resources{
Jobs: map[string]*resources.Job{
"job_1": {JobSettings: &jobs.JobSettings{Name: "job_1"}},
"job_2": {JobSettings: &jobs.JobSettings{Name: "job_2"}},
},
},
},
}
m := mocks.NewMockWorkspaceClient(t)
b.SetWorkpaceClient(m.WorkspaceClient)
diags := bundle.Apply(context.Background(), b, bundle.Seq(ValidateSharedRootPermissions()))
require.Empty(t, diags)
}
func TestValidateSharedRootPermissionsForSharedError(t *testing.T) {
b := &bundle.Bundle{
Config: config.Root{
Workspace: config.Workspace{
RootPath: "/Workspace/Shared/foo/bar",
},
Permissions: []resources.Permission{
{Level: CAN_MANAGE, UserName: "foo@bar.com"},
},
Resources: config.Resources{
Jobs: map[string]*resources.Job{
"job_1": {JobSettings: &jobs.JobSettings{Name: "job_1"}},
"job_2": {JobSettings: &jobs.JobSettings{Name: "job_2"}},
},
},
},
}
m := mocks.NewMockWorkspaceClient(t)
b.SetWorkpaceClient(m.WorkspaceClient)
diags := bundle.Apply(context.Background(), b, bundle.Seq(ValidateSharedRootPermissions()))
require.Len(t, diags, 1)
require.Equal(t, "the bundle root path /Workspace/Shared/foo/bar is writable by all workspace users", diags[0].Summary)
require.Equal(t, diag.Warning, diags[0].Severity)
}

View File

@ -0,0 +1,89 @@
package permissions
import (
"fmt"
"strings"
"github.com/databricks/cli/bundle/config/resources"
"github.com/databricks/cli/libs/diag"
"github.com/databricks/databricks-sdk-go/service/workspace"
)
type WorkspacePathPermissions struct {
Path string
Permissions []resources.Permission
}
func ObjectAclToResourcePermissions(path string, acl []workspace.WorkspaceObjectAccessControlResponse) *WorkspacePathPermissions {
permissions := make([]resources.Permission, 0)
for _, a := range acl {
// Skip the admin group because it's added to all resources by default.
if a.GroupName == "admins" {
continue
}
for _, pl := range a.AllPermissions {
permissions = append(permissions, resources.Permission{
Level: convertWorkspaceObjectPermissionLevel(pl.PermissionLevel),
GroupName: a.GroupName,
UserName: a.UserName,
ServicePrincipalName: a.ServicePrincipalName,
})
}
}
return &WorkspacePathPermissions{Permissions: permissions, Path: path}
}
func (p WorkspacePathPermissions) Compare(perms []resources.Permission) diag.Diagnostics {
var diags diag.Diagnostics
// Check the permissions in the workspace and see if they are all set in the bundle.
ok, missing := containsAll(p.Permissions, perms)
if !ok {
diags = diags.Append(diag.Diagnostic{
Severity: diag.Warning,
Summary: "untracked permissions apply to target workspace path",
Detail: fmt.Sprintf("The following permissions apply to the workspace folder at %q but are not configured in the bundle:\n%s", p.Path, toString(missing)),
})
}
return diags
}
// containsAll checks if permA contains all permissions in permB.
func containsAll(permA []resources.Permission, permB []resources.Permission) (bool, []resources.Permission) {
missing := make([]resources.Permission, 0)
for _, a := range permA {
found := false
for _, b := range permB {
if a == b {
found = true
break
}
}
if !found {
missing = append(missing, a)
}
}
return len(missing) == 0, missing
}
// convertWorkspaceObjectPermissionLevel converts matching object permission levels to bundle ones.
// If there is no matching permission level, it returns permission level as is, for example, CAN_EDIT.
func convertWorkspaceObjectPermissionLevel(level workspace.WorkspaceObjectPermissionLevel) string {
switch level {
case workspace.WorkspaceObjectPermissionLevelCanRead:
return CAN_VIEW
default:
return string(level)
}
}
func toString(p []resources.Permission) string {
var sb strings.Builder
for _, perm := range p {
sb.WriteString(fmt.Sprintf("- %s\n", perm.String()))
}
return sb.String()
}

View File

@ -0,0 +1,121 @@
package permissions
import (
"testing"
"github.com/databricks/cli/bundle/config/resources"
"github.com/databricks/cli/libs/diag"
"github.com/databricks/databricks-sdk-go/service/workspace"
"github.com/stretchr/testify/require"
)
func TestWorkspacePathPermissionsCompare(t *testing.T) {
testCases := []struct {
perms []resources.Permission
acl []workspace.WorkspaceObjectAccessControlResponse
expected diag.Diagnostics
}{
{
perms: []resources.Permission{
{Level: CAN_MANAGE, UserName: "foo@bar.com"},
},
acl: []workspace.WorkspaceObjectAccessControlResponse{
{
UserName: "foo@bar.com",
AllPermissions: []workspace.WorkspaceObjectPermission{
{PermissionLevel: "CAN_MANAGE"},
},
},
},
expected: nil,
},
{
perms: []resources.Permission{
{Level: CAN_MANAGE, UserName: "foo@bar.com"},
},
acl: []workspace.WorkspaceObjectAccessControlResponse{
{
UserName: "foo@bar.com",
AllPermissions: []workspace.WorkspaceObjectPermission{
{PermissionLevel: "CAN_MANAGE"},
},
},
{
GroupName: "admins",
AllPermissions: []workspace.WorkspaceObjectPermission{
{PermissionLevel: "CAN_MANAGE"},
},
},
},
expected: nil,
},
{
perms: []resources.Permission{
{Level: CAN_VIEW, UserName: "foo@bar.com"},
{Level: CAN_MANAGE, ServicePrincipalName: "sp.com"},
},
acl: []workspace.WorkspaceObjectAccessControlResponse{
{
UserName: "foo@bar.com",
AllPermissions: []workspace.WorkspaceObjectPermission{
{PermissionLevel: "CAN_READ"},
},
},
},
expected: nil,
},
{
perms: []resources.Permission{
{Level: CAN_MANAGE, UserName: "foo@bar.com"},
},
acl: []workspace.WorkspaceObjectAccessControlResponse{
{
UserName: "foo@bar.com",
AllPermissions: []workspace.WorkspaceObjectPermission{
{PermissionLevel: "CAN_MANAGE"},
},
},
{
GroupName: "foo",
AllPermissions: []workspace.WorkspaceObjectPermission{
{PermissionLevel: "CAN_MANAGE"},
},
},
},
expected: diag.Diagnostics{
{
Severity: diag.Warning,
Summary: "untracked permissions apply to target workspace path",
Detail: "The following permissions apply to the workspace folder at \"path\" but are not configured in the bundle:\n- level: CAN_MANAGE, group_name: foo\n",
},
},
},
{
perms: []resources.Permission{
{Level: CAN_MANAGE, UserName: "foo@bar.com"},
},
acl: []workspace.WorkspaceObjectAccessControlResponse{
{
UserName: "foo2@bar.com",
AllPermissions: []workspace.WorkspaceObjectPermission{
{PermissionLevel: "CAN_MANAGE"},
},
},
},
expected: diag.Diagnostics{
{
Severity: diag.Warning,
Summary: "untracked permissions apply to target workspace path",
Detail: "The following permissions apply to the workspace folder at \"path\" but are not configured in the bundle:\n- level: CAN_MANAGE, user_name: foo2@bar.com\n",
},
},
},
}
for _, tc := range testCases {
wp := ObjectAclToResourcePermissions("path", tc.acl)
diags := wp.Compare(tc.perms)
require.Equal(t, tc.expected, diags)
}
}

View File

@ -16,6 +16,10 @@ func ApplyWorkspaceRootPermissions() bundle.Mutator {
return &workspaceRootPermissions{}
}
func (*workspaceRootPermissions) Name() string {
return "ApplyWorkspaceRootPermissions"
}
// Apply implements bundle.Mutator.
func (*workspaceRootPermissions) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
err := giveAccessForWorkspaceRoot(ctx, b)
@ -26,15 +30,11 @@ func (*workspaceRootPermissions) Apply(ctx context.Context, b *bundle.Bundle) di
return nil
}
func (*workspaceRootPermissions) Name() string {
return "ApplyWorkspaceRootPermissions"
}
func giveAccessForWorkspaceRoot(ctx context.Context, b *bundle.Bundle) error {
permissions := make([]workspace.WorkspaceObjectAccessControlRequest, 0)
for _, p := range b.Config.Permissions {
level, err := getWorkspaceObjectPermissionLevel(p.Level)
level, err := GetWorkspaceObjectPermissionLevel(p.Level)
if err != nil {
return err
}
@ -65,7 +65,7 @@ func giveAccessForWorkspaceRoot(ctx context.Context, b *bundle.Bundle) error {
return err
}
func getWorkspaceObjectPermissionLevel(bundlePermission string) (workspace.WorkspaceObjectPermissionLevel, error) {
func GetWorkspaceObjectPermissionLevel(bundlePermission string) (workspace.WorkspaceObjectPermissionLevel, error) {
switch bundlePermission {
case CAN_MANAGE:
return workspace.WorkspaceObjectPermissionLevelCanManage, nil

View File

@ -69,6 +69,6 @@ func TestApplyWorkspaceRootPermissions(t *testing.T) {
WorkspaceObjectType: "directories",
}).Return(nil, nil)
diags := bundle.Apply(context.Background(), b, ApplyWorkspaceRootPermissions())
require.NoError(t, diags.Error())
diags := bundle.Apply(context.Background(), b, bundle.Seq(ValidateSharedRootPermissions(), ApplyWorkspaceRootPermissions()))
require.Empty(t, diags)
}

View File

@ -146,6 +146,7 @@ func Deploy(outputHandler sync.OutputHandler) bundle.Mutator {
bundle.Defer(
bundle.Seq(
terraform.StatePull(),
terraform.CheckDashboardsModifiedRemotely(),
deploy.StatePull(),
mutator.ValidateGitDetails(),
artifacts.CleanUp(),

View File

@ -66,6 +66,7 @@ func Initialize() bundle.Mutator {
permissions.PermissionDiagnostics(),
mutator.SetRunAs(),
mutator.OverrideCompute(),
mutator.ConfigureDashboardDefaults(),
mutator.ProcessTargetMode(),
mutator.ApplyPresets(),
mutator.DefaultQueueing(),
@ -76,8 +77,11 @@ func Initialize() bundle.Mutator {
mutator.TranslatePaths(),
trampoline.WrapperWarning(),
permissions.ValidateSharedRootPermissions(),
permissions.ApplyBundlePermissions(),
permissions.FilterCurrentUser(),
metadata.AnnotateJobs(),
metadata.AnnotatePipelines(),
terraform.Initialize(),

View File

@ -1,9 +1,11 @@
package render
import (
"context"
"fmt"
"io"
"path/filepath"
"sort"
"strings"
"text/template"
@ -29,7 +31,7 @@ var renderFuncMap = template.FuncMap{
},
}
const summaryTemplate = `{{- if .Name -}}
const summaryHeaderTemplate = `{{- if .Name -}}
Name: {{ .Name | bold }}
{{- if .Target }}
Target: {{ .Target | bold }}
@ -46,12 +48,30 @@ Workspace:
Path: {{ .Path | bold }}
{{- end }}
{{- end }}
{{ end -}}`
{{ end -}}
{{ .Trailer }}
const resourcesTemplate = `Resources:
{{- range . }}
{{ .GroupName }}:
{{- range .Resources }}
{{ .Key | bold }}:
Name: {{ .Name }}
URL: {{ if .URL }}{{ .URL | cyan }}{{ else }}{{ "(not deployed)" | cyan }}{{ end }}
{{- end }}
{{- end }}
`
type ResourceGroup struct {
GroupName string
Resources []ResourceInfo
}
type ResourceInfo struct {
Key string
Name string
URL string
}
func pluralize(n int, singular, plural string) string {
if n == 1 {
return fmt.Sprintf("%d %s", n, singular)
@ -74,20 +94,20 @@ func buildTrailer(diags diag.Diagnostics) string {
case len(parts) >= 3:
first := strings.Join(parts[:len(parts)-1], ", ")
last := parts[len(parts)-1]
return fmt.Sprintf("Found %s, and %s", first, last)
return fmt.Sprintf("Found %s, and %s\n", first, last)
case len(parts) == 2:
return fmt.Sprintf("Found %s and %s", parts[0], parts[1])
return fmt.Sprintf("Found %s and %s\n", parts[0], parts[1])
case len(parts) == 1:
return fmt.Sprintf("Found %s", parts[0])
return fmt.Sprintf("Found %s\n", parts[0])
default:
// No diagnostics to print.
return color.GreenString("Validation OK!")
return color.GreenString("Validation OK!\n")
}
}
func renderSummaryTemplate(out io.Writer, b *bundle.Bundle, diags diag.Diagnostics) error {
func renderSummaryHeaderTemplate(out io.Writer, b *bundle.Bundle) error {
if b == nil {
return renderSummaryTemplate(out, &bundle.Bundle{}, diags)
return renderSummaryHeaderTemplate(out, &bundle.Bundle{})
}
var currentUser = &iam.User{}
@ -98,20 +118,19 @@ func renderSummaryTemplate(out io.Writer, b *bundle.Bundle, diags diag.Diagnosti
}
}
t := template.Must(template.New("summary").Funcs(renderFuncMap).Parse(summaryTemplate))
t := template.Must(template.New("summary").Funcs(renderFuncMap).Parse(summaryHeaderTemplate))
err := t.Execute(out, map[string]any{
"Name": b.Config.Bundle.Name,
"Target": b.Config.Bundle.Target,
"User": currentUser.UserName,
"Path": b.Config.Workspace.RootPath,
"Host": b.Config.Workspace.Host,
"Trailer": buildTrailer(diags),
"Name": b.Config.Bundle.Name,
"Target": b.Config.Bundle.Target,
"User": currentUser.UserName,
"Path": b.Config.Workspace.RootPath,
"Host": b.Config.Workspace.Host,
})
return err
}
func renderDiagnostics(out io.Writer, b *bundle.Bundle, diags diag.Diagnostics) error {
func renderDiagnosticsOnly(out io.Writer, b *bundle.Bundle, diags diag.Diagnostics) error {
for _, d := range diags {
for i := range d.Locations {
if b == nil {
@ -139,19 +158,73 @@ type RenderOptions struct {
RenderSummaryTable bool
}
// RenderTextOutput renders the diagnostics in a human-readable format.
func RenderTextOutput(out io.Writer, b *bundle.Bundle, diags diag.Diagnostics, opts RenderOptions) error {
err := renderDiagnostics(out, b, diags)
// RenderDiagnostics renders the diagnostics in a human-readable format.
func RenderDiagnostics(out io.Writer, b *bundle.Bundle, diags diag.Diagnostics, opts RenderOptions) error {
err := renderDiagnosticsOnly(out, b, diags)
if err != nil {
return fmt.Errorf("failed to render diagnostics: %w", err)
}
if opts.RenderSummaryTable {
err = renderSummaryTemplate(out, b, diags)
if err != nil {
return fmt.Errorf("failed to render summary: %w", err)
if b != nil {
err = renderSummaryHeaderTemplate(out, b)
if err != nil {
return fmt.Errorf("failed to render summary: %w", err)
}
io.WriteString(out, "\n")
}
trailer := buildTrailer(diags)
io.WriteString(out, trailer)
}
return nil
}
func RenderSummary(ctx context.Context, out io.Writer, b *bundle.Bundle) error {
if err := renderSummaryHeaderTemplate(out, b); err != nil {
return err
}
var resourceGroups []ResourceGroup
for _, group := range b.Config.Resources.AllResources() {
resources := make([]ResourceInfo, 0, len(group.Resources))
for key, resource := range group.Resources {
resources = append(resources, ResourceInfo{
Key: key,
Name: resource.GetName(),
URL: resource.GetURL(),
})
}
if len(resources) > 0 {
resourceGroups = append(resourceGroups, ResourceGroup{
GroupName: group.Description.PluralTitle,
Resources: resources,
})
}
}
if err := renderResourcesTemplate(out, resourceGroups); err != nil {
return fmt.Errorf("failed to render resources template: %w", err)
}
return nil
}
// Helper function to sort and render resource groups using the template
func renderResourcesTemplate(out io.Writer, resourceGroups []ResourceGroup) error {
// Sort everything to ensure consistent output
sort.Slice(resourceGroups, func(i, j int) bool {
return resourceGroups[i].GroupName < resourceGroups[j].GroupName
})
for _, group := range resourceGroups {
sort.Slice(group.Resources, func(i, j int) bool {
return group.Resources[i].Key < group.Resources[j].Key
})
}
t := template.Must(template.New("resources").Funcs(renderFuncMap).Parse(resourcesTemplate))
return t.Execute(out, resourceGroups)
}

View File

@ -2,14 +2,21 @@ package render
import (
"bytes"
"context"
"io"
"testing"
"github.com/databricks/cli/bundle"
"github.com/databricks/cli/bundle/config"
"github.com/databricks/cli/bundle/config/resources"
"github.com/databricks/cli/libs/diag"
"github.com/databricks/cli/libs/dyn"
assert "github.com/databricks/cli/libs/dyn/dynassert"
"github.com/databricks/databricks-sdk-go/service/catalog"
"github.com/databricks/databricks-sdk-go/service/iam"
"github.com/databricks/databricks-sdk-go/service/jobs"
"github.com/databricks/databricks-sdk-go/service/pipelines"
"github.com/databricks/databricks-sdk-go/service/serving"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@ -326,7 +333,7 @@ func TestRenderTextOutput(t *testing.T) {
t.Run(tc.name, func(t *testing.T) {
writer := &bytes.Buffer{}
err := RenderTextOutput(writer, tc.bundle, tc.diags, tc.opts)
err := RenderDiagnostics(writer, tc.bundle, tc.diags, tc.opts)
require.NoError(t, err)
assert.Equal(t, tc.expected, writer.String())
@ -468,7 +475,7 @@ func TestRenderDiagnostics(t *testing.T) {
t.Run(tc.name, func(t *testing.T) {
writer := &bytes.Buffer{}
err := renderDiagnostics(writer, bundle, tc.diags)
err := renderDiagnosticsOnly(writer, bundle, tc.diags)
require.NoError(t, err)
assert.Equal(t, tc.expected, writer.String())
@ -479,8 +486,105 @@ func TestRenderDiagnostics(t *testing.T) {
func TestRenderSummaryTemplate_nilBundle(t *testing.T) {
writer := &bytes.Buffer{}
err := renderSummaryTemplate(writer, nil, nil)
err := renderSummaryHeaderTemplate(writer, nil)
require.NoError(t, err)
io.WriteString(writer, buildTrailer(nil))
assert.Equal(t, "Validation OK!\n", writer.String())
}
func TestRenderSummary(t *testing.T) {
ctx := context.Background()
// Create a mock bundle with various resources
b := &bundle.Bundle{
Config: config.Root{
Bundle: config.Bundle{
Name: "test-bundle",
Target: "test-target",
},
Workspace: config.Workspace{
Host: "https://mycompany.databricks.com/",
},
Resources: config.Resources{
Jobs: map[string]*resources.Job{
"job1": {
ID: "1",
URL: "https://url1",
JobSettings: &jobs.JobSettings{Name: "job1-name"},
},
"job2": {
ID: "2",
URL: "https://url2",
JobSettings: &jobs.JobSettings{Name: "job2-name"},
},
},
Pipelines: map[string]*resources.Pipeline{
"pipeline2": {
ID: "4",
// no URL
PipelineSpec: &pipelines.PipelineSpec{Name: "pipeline2-name"},
},
"pipeline1": {
ID: "3",
URL: "https://url3",
PipelineSpec: &pipelines.PipelineSpec{Name: "pipeline1-name"},
},
},
Schemas: map[string]*resources.Schema{
"schema1": {
ID: "catalog.schema",
CreateSchema: &catalog.CreateSchema{
Name: "schema",
},
// no URL
},
},
ModelServingEndpoints: map[string]*resources.ModelServingEndpoint{
"endpoint1": {
ID: "7",
CreateServingEndpoint: &serving.CreateServingEndpoint{
Name: "my_serving_endpoint",
},
URL: "https://url4",
},
},
},
},
}
writer := &bytes.Buffer{}
err := RenderSummary(ctx, writer, b)
require.NoError(t, err)
expectedSummary := `Name: test-bundle
Target: test-target
Workspace:
Host: https://mycompany.databricks.com/
Resources:
Jobs:
job1:
Name: job1-name
URL: https://url1
job2:
Name: job2-name
URL: https://url2
Model Serving Endpoints:
endpoint1:
Name: my_serving_endpoint
URL: https://url4
Pipelines:
pipeline1:
Name: pipeline1-name
URL: https://url3
pipeline2:
Name: pipeline2-name
URL: (not deployed)
Schemas:
schema1:
Name: schema
URL: (not deployed)
`
assert.Equal(t, expectedSummary, writer.String())
}

View File

@ -0,0 +1,17 @@
package resources
import "github.com/databricks/cli/bundle"
// Completions returns the same as [References] except
// that every key maps directly to a single reference.
func Completions(b *bundle.Bundle, filters ...Filter) map[string]Reference {
out := make(map[string]Reference)
keyOnlyRefs, _ := References(b, filters...)
for k, refs := range keyOnlyRefs {
if len(refs) != 1 {
continue
}
out[k] = refs[0]
}
return out
}

View File

@ -0,0 +1,58 @@
package resources
import (
"testing"
"github.com/databricks/cli/bundle"
"github.com/databricks/cli/bundle/config"
"github.com/databricks/cli/bundle/config/resources"
"github.com/stretchr/testify/assert"
)
func TestCompletions_SkipDuplicates(t *testing.T) {
b := &bundle.Bundle{
Config: config.Root{
Resources: config.Resources{
Jobs: map[string]*resources.Job{
"foo": {},
"bar": {},
},
Pipelines: map[string]*resources.Pipeline{
"foo": {},
},
},
},
}
// Test that this skips duplicates and only includes unambiguous completions.
out := Completions(b)
if assert.Len(t, out, 1) {
assert.Contains(t, out, "bar")
}
}
func TestCompletions_Filter(t *testing.T) {
b := &bundle.Bundle{
Config: config.Root{
Resources: config.Resources{
Jobs: map[string]*resources.Job{
"foo": {},
},
Pipelines: map[string]*resources.Pipeline{
"bar": {},
},
},
},
}
includeJobs := func(ref Reference) bool {
_, ok := ref.Resource.(*resources.Job)
return ok
}
// Test that this does not include the pipeline.
out := Completions(b, includeJobs)
if assert.Len(t, out, 1) {
assert.Contains(t, out, "foo")
}
}

View File

@ -0,0 +1,98 @@
package resources
import (
"fmt"
"github.com/databricks/cli/bundle"
"github.com/databricks/cli/bundle/config"
)
// Reference is a reference to a resource.
// It includes the resource type description, and a reference to the resource itself.
type Reference struct {
// Key is the unique key of the resource, e.g. "my_job".
Key string
// KeyWithType is the unique key of the resource, including the resource type, e.g. "jobs.my_job".
KeyWithType string
// Description is the resource type description.
Description config.ResourceDescription
// Resource is the resource itself.
Resource config.ConfigResource
}
// Map is the core type for resource lookup and completion.
type Map map[string][]Reference
// Filter defines the function signature for filtering resources.
type Filter func(Reference) bool
// includeReference checks if the specified reference passes all filters.
// If the list of filters is empty, the reference is always included.
func includeReference(filters []Filter, ref Reference) bool {
for _, filter := range filters {
if !filter(ref) {
return false
}
}
return true
}
// References returns maps of resource keys to a slice of [Reference].
//
// The first map is indexed by the resource key only.
// The second map is indexed by the resource type name and its key.
//
// While the return types allows for multiple resources to share the same key,
// this is confirmed not to happen in the [validate.UniqueResourceKeys] mutator.
func References(b *bundle.Bundle, filters ...Filter) (Map, Map) {
keyOnly := make(Map)
keyWithType := make(Map)
// Collect map of resource references indexed by their keys.
for _, group := range b.Config.Resources.AllResources() {
for k, v := range group.Resources {
ref := Reference{
Key: k,
KeyWithType: fmt.Sprintf("%s.%s", group.Description.PluralName, k),
Description: group.Description,
Resource: v,
}
// Skip resources that do not pass all filters.
if !includeReference(filters, ref) {
continue
}
keyOnly[ref.Key] = append(keyOnly[ref.Key], ref)
keyWithType[ref.KeyWithType] = append(keyWithType[ref.KeyWithType], ref)
}
}
return keyOnly, keyWithType
}
// Lookup returns the resource with the specified key.
// If the key maps to more than one resource, an error is returned.
// If the key does not map to any resource, an error is returned.
func Lookup(b *bundle.Bundle, key string, filters ...Filter) (Reference, error) {
keyOnlyRefs, keyWithTypeRefs := References(b, filters...)
refs, ok := keyOnlyRefs[key]
if !ok {
refs, ok = keyWithTypeRefs[key]
if !ok {
return Reference{}, fmt.Errorf("resource with key %q not found", key)
}
}
switch {
case len(refs) == 1:
return refs[0], nil
case len(refs) > 1:
return Reference{}, fmt.Errorf("multiple resources with key %q found", key)
default:
panic("unreachable")
}
}

View File

@ -0,0 +1,117 @@
package resources
import (
"testing"
"github.com/databricks/cli/bundle"
"github.com/databricks/cli/bundle/config"
"github.com/databricks/cli/bundle/config/resources"
"github.com/databricks/databricks-sdk-go/service/jobs"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestLookup_EmptyBundle(t *testing.T) {
b := &bundle.Bundle{
Config: config.Root{
Resources: config.Resources{},
},
}
_, err := Lookup(b, "foo")
require.Error(t, err)
assert.ErrorContains(t, err, "resource with key \"foo\" not found")
}
func TestLookup_NotFound(t *testing.T) {
b := &bundle.Bundle{
Config: config.Root{
Resources: config.Resources{
Jobs: map[string]*resources.Job{
"foo": {},
"bar": {},
},
},
},
}
_, err := Lookup(b, "qux")
require.Error(t, err)
assert.ErrorContains(t, err, `resource with key "qux" not found`)
}
func TestLookup_MultipleFound(t *testing.T) {
b := &bundle.Bundle{
Config: config.Root{
Resources: config.Resources{
Jobs: map[string]*resources.Job{
"foo": {},
},
Pipelines: map[string]*resources.Pipeline{
"foo": {},
},
},
},
}
_, err := Lookup(b, "foo")
require.Error(t, err)
assert.ErrorContains(t, err, `multiple resources with key "foo" found`)
}
func TestLookup_Nominal(t *testing.T) {
b := &bundle.Bundle{
Config: config.Root{
Resources: config.Resources{
Jobs: map[string]*resources.Job{
"foo": {
JobSettings: &jobs.JobSettings{
Name: "Foo job",
},
},
},
},
},
}
// Lookup by key only.
out, err := Lookup(b, "foo")
if assert.NoError(t, err) {
assert.Equal(t, "Foo job", out.Resource.GetName())
}
// Lookup by type and key.
out, err = Lookup(b, "jobs.foo")
if assert.NoError(t, err) {
assert.Equal(t, "Foo job", out.Resource.GetName())
}
}
func TestLookup_NominalWithFilters(t *testing.T) {
b := &bundle.Bundle{
Config: config.Root{
Resources: config.Resources{
Jobs: map[string]*resources.Job{
"foo": {},
},
Pipelines: map[string]*resources.Pipeline{
"bar": {},
},
},
},
}
includeJobs := func(ref Reference) bool {
_, ok := ref.Resource.(*resources.Job)
return ok
}
// This should succeed because the filter includes jobs.
_, err := Lookup(b, "foo", includeJobs)
require.NoError(t, err)
// This should fail because the filter excludes pipelines.
_, err = Lookup(b, "bar", includeJobs)
require.Error(t, err)
assert.ErrorContains(t, err, `resource with key "bar" not found`)
}

View File

@ -317,6 +317,29 @@ func (r *jobRunner) Cancel(ctx context.Context) error {
return errGroup.Wait()
}
func (r *jobRunner) Restart(ctx context.Context, opts *Options) (output.RunOutput, error) {
// We don't need to cancel existing runs if the job is continuous and unpaused.
// the /jobs/run-now API will automatically cancel any existing runs before starting a new one.
//
// /jobs/run-now will not cancel existing runs if the job is continuous and paused.
// New job runs will be queued instead and will wait for existing runs to finish.
// In this case, we need to cancel the existing runs before starting a new one.
continuous := r.job.JobSettings.Continuous
if continuous != nil && continuous.PauseStatus == jobs.PauseStatusUnpaused {
return r.Run(ctx, opts)
}
s := cmdio.Spinner(ctx)
s <- "Cancelling all active job runs"
err := r.Cancel(ctx)
close(s)
if err != nil {
return nil, err
}
return r.Run(ctx, opts)
}
func (r *jobRunner) ParseArgs(args []string, opts *Options) error {
return r.posArgsHandler().ParseArgs(args, opts)
}

View File

@ -1,6 +1,7 @@
package run
import (
"bytes"
"context"
"testing"
"time"
@ -8,6 +9,8 @@ import (
"github.com/databricks/cli/bundle"
"github.com/databricks/cli/bundle/config"
"github.com/databricks/cli/bundle/config/resources"
"github.com/databricks/cli/libs/cmdio"
"github.com/databricks/cli/libs/flags"
"github.com/databricks/databricks-sdk-go/experimental/mocks"
"github.com/databricks/databricks-sdk-go/service/jobs"
"github.com/stretchr/testify/mock"
@ -126,3 +129,132 @@ func TestJobRunnerCancelWithNoActiveRuns(t *testing.T) {
err := runner.Cancel(context.Background())
require.NoError(t, err)
}
func TestJobRunnerRestart(t *testing.T) {
for _, jobSettings := range []*jobs.JobSettings{
{},
{
Continuous: &jobs.Continuous{
PauseStatus: jobs.PauseStatusPaused,
},
},
} {
job := &resources.Job{
ID: "123",
JobSettings: jobSettings,
}
b := &bundle.Bundle{
Config: config.Root{
Resources: config.Resources{
Jobs: map[string]*resources.Job{
"test_job": job,
},
},
},
}
runner := jobRunner{key: "test", bundle: b, job: job}
m := mocks.NewMockWorkspaceClient(t)
b.SetWorkpaceClient(m.WorkspaceClient)
ctx := context.Background()
ctx = cmdio.InContext(ctx, cmdio.NewIO(flags.OutputText, &bytes.Buffer{}, &bytes.Buffer{}, &bytes.Buffer{}, "", ""))
ctx = cmdio.NewContext(ctx, cmdio.NewLogger(flags.ModeAppend))
jobApi := m.GetMockJobsAPI()
jobApi.EXPECT().ListRunsAll(mock.Anything, jobs.ListRunsRequest{
ActiveOnly: true,
JobId: 123,
}).Return([]jobs.BaseRun{
{RunId: 1},
{RunId: 2},
}, nil)
// Mock the runner cancelling existing job runs.
mockWait := &jobs.WaitGetRunJobTerminatedOrSkipped[struct{}]{
Poll: func(time time.Duration, f func(j *jobs.Run)) (*jobs.Run, error) {
return nil, nil
},
}
jobApi.EXPECT().CancelRun(mock.Anything, jobs.CancelRun{
RunId: 1,
}).Return(mockWait, nil)
jobApi.EXPECT().CancelRun(mock.Anything, jobs.CancelRun{
RunId: 2,
}).Return(mockWait, nil)
// Mock the runner triggering a job run
mockWaitForRun := &jobs.WaitGetRunJobTerminatedOrSkipped[jobs.RunNowResponse]{
Poll: func(d time.Duration, f func(*jobs.Run)) (*jobs.Run, error) {
return &jobs.Run{
State: &jobs.RunState{
ResultState: jobs.RunResultStateSuccess,
},
}, nil
},
}
jobApi.EXPECT().RunNow(mock.Anything, jobs.RunNow{
JobId: 123,
}).Return(mockWaitForRun, nil)
// Mock the runner getting the job output
jobApi.EXPECT().GetRun(mock.Anything, jobs.GetRunRequest{}).Return(&jobs.Run{}, nil)
_, err := runner.Restart(ctx, &Options{})
require.NoError(t, err)
}
}
func TestJobRunnerRestartForContinuousUnpausedJobs(t *testing.T) {
job := &resources.Job{
ID: "123",
JobSettings: &jobs.JobSettings{
Continuous: &jobs.Continuous{
PauseStatus: jobs.PauseStatusUnpaused,
},
},
}
b := &bundle.Bundle{
Config: config.Root{
Resources: config.Resources{
Jobs: map[string]*resources.Job{
"test_job": job,
},
},
},
}
runner := jobRunner{key: "test", bundle: b, job: job}
m := mocks.NewMockWorkspaceClient(t)
b.SetWorkpaceClient(m.WorkspaceClient)
ctx := context.Background()
ctx = cmdio.InContext(ctx, cmdio.NewIO(flags.OutputText, &bytes.Buffer{}, &bytes.Buffer{}, &bytes.Buffer{}, "", "..."))
ctx = cmdio.NewContext(ctx, cmdio.NewLogger(flags.ModeAppend))
jobApi := m.GetMockJobsAPI()
// The runner should not try and cancel existing job runs for unpaused continuous jobs.
jobApi.AssertNotCalled(t, "ListRunsAll")
jobApi.AssertNotCalled(t, "CancelRun")
// Mock the runner triggering a job run
mockWaitForRun := &jobs.WaitGetRunJobTerminatedOrSkipped[jobs.RunNowResponse]{
Poll: func(d time.Duration, f func(*jobs.Run)) (*jobs.Run, error) {
return &jobs.Run{
State: &jobs.RunState{
ResultState: jobs.RunResultStateSuccess,
},
}, nil
},
}
jobApi.EXPECT().RunNow(mock.Anything, jobs.RunNow{
JobId: 123,
}).Return(mockWaitForRun, nil)
// Mock the runner getting the job output
jobApi.EXPECT().GetRun(mock.Anything, jobs.GetRunRequest{}).Return(&jobs.Run{}, nil)
_, err := runner.Restart(ctx, &Options{})
require.NoError(t, err)
}

View File

@ -1,69 +0,0 @@
package run
import (
"fmt"
"github.com/databricks/cli/bundle"
"golang.org/x/exp/maps"
)
// RunnerLookup maps identifiers to a list of workloads that match that identifier.
// The list can have more than 1 entry if resources of different types use the
// same key. When this happens, the user should disambiguate between them.
type RunnerLookup map[string][]Runner
// ResourceKeys computes a map with
func ResourceKeys(b *bundle.Bundle) (keyOnly RunnerLookup, keyWithType RunnerLookup) {
keyOnly = make(RunnerLookup)
keyWithType = make(RunnerLookup)
r := b.Config.Resources
for k, v := range r.Jobs {
kt := fmt.Sprintf("jobs.%s", k)
w := jobRunner{key: key(kt), bundle: b, job: v}
keyOnly[k] = append(keyOnly[k], &w)
keyWithType[kt] = append(keyWithType[kt], &w)
}
for k, v := range r.Pipelines {
kt := fmt.Sprintf("pipelines.%s", k)
w := pipelineRunner{key: key(kt), bundle: b, pipeline: v}
keyOnly[k] = append(keyOnly[k], &w)
keyWithType[kt] = append(keyWithType[kt], &w)
}
return
}
// ResourceCompletionMap returns a map of resource keys to their respective names.
func ResourceCompletionMap(b *bundle.Bundle) map[string]string {
out := make(map[string]string)
keyOnly, keyWithType := ResourceKeys(b)
// Keep track of resources we have seen by their fully qualified key.
seen := make(map[string]bool)
// First add resources that can be identified by key alone.
for k, v := range keyOnly {
// Invariant: len(v) >= 1. See [ResourceKeys].
if len(v) == 1 {
seen[v[0].Key()] = true
out[k] = v[0].Name()
}
}
// Then add resources that can only be identified by their type and key.
for k, v := range keyWithType {
// Invariant: len(v) == 1. See [ResourceKeys].
_, ok := seen[v[0].Key()]
if ok {
continue
}
out[k] = v[0].Name()
}
return out
}
// ResourceCompletions returns a list of keys that unambiguously reference resources in the bundle.
func ResourceCompletions(b *bundle.Bundle) []string {
return maps.Keys(ResourceCompletionMap(b))
}

View File

@ -1,25 +0,0 @@
package run
import (
"testing"
"github.com/databricks/cli/bundle"
"github.com/databricks/cli/bundle/config"
"github.com/databricks/cli/bundle/config/resources"
"github.com/stretchr/testify/assert"
)
func TestResourceCompletionsUnique(t *testing.T) {
b := &bundle.Bundle{
Config: config.Root{
Resources: config.Resources{
Jobs: map[string]*resources.Job{
"foo": {},
"bar": {},
},
},
},
}
assert.ElementsMatch(t, []string{"foo", "bar"}, ResourceCompletions(b))
}

View File

@ -183,6 +183,18 @@ func (r *pipelineRunner) Cancel(ctx context.Context) error {
return err
}
func (r *pipelineRunner) Restart(ctx context.Context, opts *Options) (output.RunOutput, error) {
s := cmdio.Spinner(ctx)
s <- "Cancelling the active pipeline update"
err := r.Cancel(ctx)
close(s)
if err != nil {
return nil, err
}
return r.Run(ctx, opts)
}
func (r *pipelineRunner) ParseArgs(args []string, opts *Options) error {
if len(args) == 0 {
return nil

View File

@ -1,6 +1,7 @@
package run
import (
"bytes"
"context"
"testing"
"time"
@ -8,8 +9,12 @@ import (
"github.com/databricks/cli/bundle"
"github.com/databricks/cli/bundle/config"
"github.com/databricks/cli/bundle/config/resources"
"github.com/databricks/cli/libs/cmdio"
"github.com/databricks/cli/libs/flags"
sdk_config "github.com/databricks/databricks-sdk-go/config"
"github.com/databricks/databricks-sdk-go/experimental/mocks"
"github.com/databricks/databricks-sdk-go/service/pipelines"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
)
@ -47,3 +52,68 @@ func TestPipelineRunnerCancel(t *testing.T) {
err := runner.Cancel(context.Background())
require.NoError(t, err)
}
func TestPipelineRunnerRestart(t *testing.T) {
pipeline := &resources.Pipeline{
ID: "123",
}
b := &bundle.Bundle{
Config: config.Root{
Resources: config.Resources{
Pipelines: map[string]*resources.Pipeline{
"test_pipeline": pipeline,
},
},
},
}
runner := pipelineRunner{key: "test", bundle: b, pipeline: pipeline}
m := mocks.NewMockWorkspaceClient(t)
m.WorkspaceClient.Config = &sdk_config.Config{
Host: "https://test.com",
}
b.SetWorkpaceClient(m.WorkspaceClient)
ctx := context.Background()
ctx = cmdio.InContext(ctx, cmdio.NewIO(flags.OutputText, &bytes.Buffer{}, &bytes.Buffer{}, &bytes.Buffer{}, "", "..."))
ctx = cmdio.NewContext(ctx, cmdio.NewLogger(flags.ModeAppend))
mockWait := &pipelines.WaitGetPipelineIdle[struct{}]{
Poll: func(time.Duration, func(*pipelines.GetPipelineResponse)) (*pipelines.GetPipelineResponse, error) {
return nil, nil
},
}
pipelineApi := m.GetMockPipelinesAPI()
pipelineApi.EXPECT().Stop(mock.Anything, pipelines.StopRequest{
PipelineId: "123",
}).Return(mockWait, nil)
pipelineApi.EXPECT().GetByPipelineId(mock.Anything, "123").Return(&pipelines.GetPipelineResponse{}, nil)
// Mock runner starting a new update
pipelineApi.EXPECT().StartUpdate(mock.Anything, pipelines.StartUpdate{
PipelineId: "123",
}).Return(&pipelines.StartUpdateResponse{
UpdateId: "456",
}, nil)
// Mock runner polling for events
pipelineApi.EXPECT().ListPipelineEventsAll(mock.Anything, pipelines.ListPipelineEventsRequest{
Filter: `update_id = '456'`,
MaxResults: 100,
PipelineId: "123",
}).Return([]pipelines.PipelineEvent{}, nil)
// Mock runner polling for update status
pipelineApi.EXPECT().GetUpdateByPipelineIdAndUpdateId(mock.Anything, "123", "456").
Return(&pipelines.GetUpdateResponse{
Update: &pipelines.UpdateInfo{
State: pipelines.UpdateInfoStateCompleted,
},
}, nil)
_, err := runner.Restart(ctx, &Options{})
require.NoError(t, err)
}

View File

@ -3,9 +3,10 @@ package run
import (
"context"
"fmt"
"strings"
"github.com/databricks/cli/bundle"
"github.com/databricks/cli/bundle/config/resources"
refs "github.com/databricks/cli/bundle/resources"
"github.com/databricks/cli/bundle/run/output"
)
@ -27,6 +28,10 @@ type Runner interface {
// Run the underlying worklow.
Run(ctx context.Context, opts *Options) (output.RunOutput, error)
// Restart the underlying workflow by cancelling any existing runs before
// starting a new one.
Restart(ctx context.Context, opts *Options) (output.RunOutput, error)
// Cancel the underlying workflow.
Cancel(ctx context.Context) error
@ -34,34 +39,24 @@ type Runner interface {
argsHandler
}
// Find locates a runner matching the specified argument.
//
// Its behavior is as follows:
// 1. Try to find a resource with <key> identical to the argument.
// 2. Try to find a resource with <type>.<key> identical to the argument.
//
// If an argument resolves to multiple resources, it returns an error.
func Find(b *bundle.Bundle, arg string) (Runner, error) {
keyOnly, keyWithType := ResourceKeys(b)
if len(keyWithType) == 0 {
return nil, fmt.Errorf("bundle defines no resources")
// IsRunnable returns a filter that only allows runnable resources.
func IsRunnable(ref refs.Reference) bool {
switch ref.Resource.(type) {
case *resources.Job, *resources.Pipeline:
return true
default:
return false
}
}
// ToRunner converts a resource reference to a runnable resource.
func ToRunner(b *bundle.Bundle, ref refs.Reference) (Runner, error) {
switch resource := ref.Resource.(type) {
case *resources.Job:
return &jobRunner{key: key(ref.KeyWithType), bundle: b, job: resource}, nil
case *resources.Pipeline:
return &pipelineRunner{key: key(ref.KeyWithType), bundle: b, pipeline: resource}, nil
default:
return nil, fmt.Errorf("unsupported resource type: %T", resource)
}
runners, ok := keyOnly[arg]
if !ok {
runners, ok = keyWithType[arg]
if !ok {
return nil, fmt.Errorf("no such resource: %s", arg)
}
}
if len(runners) != 1 {
var keys []string
for _, runner := range runners {
keys = append(keys, runner.Key())
}
return nil, fmt.Errorf("ambiguous: %s (can resolve to all of %s)", arg, strings.Join(keys, ", "))
}
return runners[0], nil
}

View File

@ -3,82 +3,14 @@ package run
import (
"testing"
"github.com/databricks/cli/bundle"
"github.com/databricks/cli/bundle/config"
"github.com/databricks/cli/bundle/config/resources"
refs "github.com/databricks/cli/bundle/resources"
"github.com/stretchr/testify/assert"
)
func TestFindNoResources(t *testing.T) {
b := &bundle.Bundle{
Config: config.Root{
Resources: config.Resources{},
},
}
_, err := Find(b, "foo")
assert.ErrorContains(t, err, "bundle defines no resources")
}
func TestFindSingleArg(t *testing.T) {
b := &bundle.Bundle{
Config: config.Root{
Resources: config.Resources{
Jobs: map[string]*resources.Job{
"foo": {},
},
},
},
}
_, err := Find(b, "foo")
assert.NoError(t, err)
}
func TestFindSingleArgNotFound(t *testing.T) {
b := &bundle.Bundle{
Config: config.Root{
Resources: config.Resources{
Jobs: map[string]*resources.Job{
"foo": {},
},
},
},
}
_, err := Find(b, "bar")
assert.ErrorContains(t, err, "no such resource: bar")
}
func TestFindSingleArgAmbiguous(t *testing.T) {
b := &bundle.Bundle{
Config: config.Root{
Resources: config.Resources{
Jobs: map[string]*resources.Job{
"key": {},
},
Pipelines: map[string]*resources.Pipeline{
"key": {},
},
},
},
}
_, err := Find(b, "key")
assert.ErrorContains(t, err, "ambiguous: ")
}
func TestFindSingleArgWithType(t *testing.T) {
b := &bundle.Bundle{
Config: config.Root{
Resources: config.Resources{
Jobs: map[string]*resources.Job{
"key": {},
},
},
},
}
_, err := Find(b, "jobs.key")
assert.NoError(t, err)
func TestRunner_IsRunnable(t *testing.T) {
assert.True(t, IsRunnable(refs.Reference{Resource: &resources.Job{}}))
assert.True(t, IsRunnable(refs.Reference{Resource: &resources.Pipeline{}}))
assert.False(t, IsRunnable(refs.Reference{Resource: &resources.MlflowModel{}}))
assert.False(t, IsRunnable(refs.Reference{Resource: &resources.MlflowExperiment{}}))
}

View File

@ -180,6 +180,48 @@
}
]
},
"resources.Dashboard": {
"anyOf": [
{
"type": "object",
"properties": {
"display_name": {
"description": "The display name of the dashboard.",
"$ref": "#/$defs/string"
},
"embed_credentials": {
"$ref": "#/$defs/bool"
},
"file_path": {
"$ref": "#/$defs/string"
},
"parent_path": {
"description": "The workspace path of the folder containing the dashboard. Includes leading slash and no\ntrailing slash.\nThis field is excluded in List Dashboards responses.",
"$ref": "#/$defs/string"
},
"permissions": {
"$ref": "#/$defs/slice/github.com/databricks/cli/bundle/config/resources.Permission"
},
"serialized_dashboard": {
"description": "The contents of the dashboard in serialized string form.\nThis field is excluded in List Dashboards responses.\nUse the [get dashboard API](https://docs.databricks.com/api/workspace/lakeview/get)\nto retrieve an example response, which includes the `serialized_dashboard` field.\nThis field provides the structure of the JSON string that represents the dashboard's\nlayout and components.",
"$ref": "#/$defs/interface"
},
"warehouse_id": {
"description": "The warehouse ID used to run the dashboard.",
"$ref": "#/$defs/string"
}
},
"additionalProperties": false,
"required": [
"display_name"
]
},
{
"type": "string",
"pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}"
}
]
},
"resources.Grant": {
"anyOf": [
{
@ -209,6 +251,10 @@
{
"type": "object",
"properties": {
"budget_policy_id": {
"description": "The id of the user specified budget policy to use for this job.\nIf not specified, a default budget policy may be applied when creating or modifying the job.\nSee `effective_budget_policy_id` for the budget policy used by this workload.",
"$ref": "#/$defs/string"
},
"continuous": {
"description": "An optional continuous property for this job. The continuous property will ensure that there is always one run executing. Only one of `schedule` and `continuous` can be used.",
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.Continuous"
@ -1050,6 +1096,9 @@
"clusters": {
"$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.Cluster"
},
"dashboards": {
"$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.Dashboard"
},
"experiments": {
"$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.MlflowExperiment"
},
@ -3901,6 +3950,10 @@
{
"type": "object",
"properties": {
"report": {
"description": "Select tables from a specific source report.",
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.ReportSpec"
},
"schema": {
"description": "Select tables from a specific source schema.",
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.SchemaSpec"
@ -4233,6 +4286,40 @@
}
]
},
"pipelines.ReportSpec": {
"anyOf": [
{
"type": "object",
"properties": {
"destination_catalog": {
"description": "Required. Destination catalog to store table.",
"$ref": "#/$defs/string"
},
"destination_schema": {
"description": "Required. Destination schema to store table.",
"$ref": "#/$defs/string"
},
"destination_table": {
"description": "Required. Destination table name. The pipeline fails if a table with that name already exists.",
"$ref": "#/$defs/string"
},
"source_url": {
"description": "Required. Report URL in the source system.",
"$ref": "#/$defs/string"
},
"table_configuration": {
"description": "Configuration settings to control the ingestion of tables. These settings override the table_configuration defined in the IngestionPipelineDefinition object.",
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.TableSpecificConfig"
}
},
"additionalProperties": false
},
{
"type": "string",
"pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}"
}
]
},
"pipelines.SchemaSpec": {
"anyOf": [
{
@ -4281,7 +4368,7 @@
"$ref": "#/$defs/string"
},
"destination_table": {
"description": "Optional. Destination table name. The pipeline fails If a table with that name already exists. If not set, the source table name is used.",
"description": "Optional. Destination table name. The pipeline fails if a table with that name already exists. If not set, the source table name is used.",
"$ref": "#/$defs/string"
},
"source_catalog": {
@ -4329,6 +4416,10 @@
"SCD_TYPE_1",
"SCD_TYPE_2"
]
},
"sequence_by": {
"description": "The column names specifying the logical order of events in the source data. Delta Live Tables uses this sequencing to handle change events that arrive out of order.",
"$ref": "#/$defs/slice/string"
}
},
"additionalProperties": false
@ -5246,6 +5337,20 @@
}
]
},
"resources.Dashboard": {
"anyOf": [
{
"type": "object",
"additionalProperties": {
"$ref": "#/$defs/github.com/databricks/cli/bundle/config/resources.Dashboard"
}
},
{
"type": "string",
"pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}"
}
]
},
"resources.Job": {
"anyOf": [
{

View File

@ -0,0 +1,33 @@
bundle:
name: issue_1828
variables:
# One entry for each of the underlying YAML (or [dyn.Kind]) types.
# The test confirms we can convert to and from the typed configuration without losing information.
map:
default:
foo: bar
sequence:
default:
- foo
- bar
string:
default: foo
bool:
default: true
int:
default: 42
float:
default: 3.14
time:
default: 2021-01-01
nil:
default:

View File

@ -0,0 +1,48 @@
package config_tests
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestIssue1828(t *testing.T) {
b := load(t, "./issue_1828")
if assert.Contains(t, b.Config.Variables, "map") {
assert.Equal(t, map[string]any{
"foo": "bar",
}, b.Config.Variables["map"].Default)
}
if assert.Contains(t, b.Config.Variables, "sequence") {
assert.Equal(t, []any{
"foo",
"bar",
}, b.Config.Variables["sequence"].Default)
}
if assert.Contains(t, b.Config.Variables, "string") {
assert.Equal(t, "foo", b.Config.Variables["string"].Default)
}
if assert.Contains(t, b.Config.Variables, "bool") {
assert.Equal(t, true, b.Config.Variables["bool"].Default)
}
if assert.Contains(t, b.Config.Variables, "int") {
assert.Equal(t, 42, b.Config.Variables["int"].Default)
}
if assert.Contains(t, b.Config.Variables, "float") {
assert.Equal(t, 3.14, b.Config.Variables["float"].Default)
}
if assert.Contains(t, b.Config.Variables, "time") {
assert.Equal(t, "2021-01-01", b.Config.Variables["time"].Default)
}
if assert.Contains(t, b.Config.Variables, "nil") {
assert.Equal(t, nil, b.Config.Variables["nil"].Default)
}
}

View File

@ -27,5 +27,6 @@ func New() *cobra.Command {
cmd.AddCommand(newGenerateCommand())
cmd.AddCommand(newDebugCommand())
cmd.AddCommand(deployment.NewDeploymentCommand())
cmd.AddCommand(newOpenCommand())
return cmd
}

View File

@ -78,7 +78,7 @@ func newDeployCommand() *cobra.Command {
}
renderOpts := render.RenderOptions{RenderSummaryTable: false}
err := render.RenderTextOutput(cmd.OutOrStdout(), b, diags, renderOpts)
err := render.RenderDiagnostics(cmd.OutOrStdout(), b, diags, renderOpts)
if err != nil {
return fmt.Errorf("failed to render output: %w", err)
}

144
cmd/bundle/open.go Normal file
View File

@ -0,0 +1,144 @@
package bundle
import (
"context"
"errors"
"fmt"
"os"
"path/filepath"
"github.com/databricks/cli/bundle"
"github.com/databricks/cli/bundle/config/mutator"
"github.com/databricks/cli/bundle/deploy/terraform"
"github.com/databricks/cli/bundle/phases"
"github.com/databricks/cli/bundle/resources"
"github.com/databricks/cli/cmd/bundle/utils"
"github.com/databricks/cli/cmd/root"
"github.com/databricks/cli/libs/cmdio"
"github.com/spf13/cobra"
"golang.org/x/exp/maps"
"github.com/pkg/browser"
)
func promptOpenArgument(ctx context.Context, b *bundle.Bundle) (string, error) {
// Compute map of "Human readable name of resource" -> "resource key".
inv := make(map[string]string)
for k, ref := range resources.Completions(b) {
title := fmt.Sprintf("%s: %s", ref.Description.SingularTitle, ref.Resource.GetName())
inv[title] = k
}
key, err := cmdio.Select(ctx, inv, "Resource to open")
if err != nil {
return "", err
}
return key, nil
}
func resolveOpenArgument(ctx context.Context, b *bundle.Bundle, args []string) (string, error) {
// If no arguments are specified, prompt the user to select the resource to open.
if len(args) == 0 && cmdio.IsPromptSupported(ctx) {
return promptOpenArgument(ctx, b)
}
if len(args) < 1 {
return "", fmt.Errorf("expected a KEY of the resource to open")
}
return args[0], nil
}
func newOpenCommand() *cobra.Command {
cmd := &cobra.Command{
Use: "open",
Short: "Open a resource in the browser",
Args: root.MaximumNArgs(1),
}
var forcePull bool
cmd.Flags().BoolVar(&forcePull, "force-pull", false, "Skip local cache and load the state from the remote workspace")
cmd.RunE = func(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
b, diags := utils.ConfigureBundleWithVariables(cmd)
if err := diags.Error(); err != nil {
return diags.Error()
}
diags = bundle.Apply(ctx, b, phases.Initialize())
if err := diags.Error(); err != nil {
return err
}
arg, err := resolveOpenArgument(ctx, b, args)
if err != nil {
return err
}
cacheDir, err := terraform.Dir(ctx, b)
if err != nil {
return err
}
_, stateFileErr := os.Stat(filepath.Join(cacheDir, terraform.TerraformStateFileName))
_, configFileErr := os.Stat(filepath.Join(cacheDir, terraform.TerraformConfigFileName))
noCache := errors.Is(stateFileErr, os.ErrNotExist) || errors.Is(configFileErr, os.ErrNotExist)
if forcePull || noCache {
diags = bundle.Apply(ctx, b, bundle.Seq(
terraform.StatePull(),
terraform.Interpolate(),
terraform.Write(),
))
if err := diags.Error(); err != nil {
return err
}
}
diags = bundle.Apply(ctx, b, bundle.Seq(
terraform.Load(),
mutator.InitializeURLs(),
))
if err := diags.Error(); err != nil {
return err
}
// Locate resource to open.
ref, err := resources.Lookup(b, arg)
if err != nil {
return err
}
// Confirm that the resource has a URL.
url := ref.Resource.GetURL()
if url == "" {
return fmt.Errorf("resource does not have a URL associated with it (has it been deployed?)")
}
return browser.OpenURL(url)
}
cmd.ValidArgsFunction = func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
b, diags := root.MustConfigureBundle(cmd)
if err := diags.Error(); err != nil {
cobra.CompErrorln(err.Error())
return nil, cobra.ShellCompDirectiveError
}
// No completion in the context of a bundle.
// Source and destination paths are taken from bundle configuration.
if b == nil {
return nil, cobra.ShellCompDirectiveNoFileComp
}
if len(args) == 0 {
completions := resources.Completions(b)
return maps.Keys(completions), cobra.ShellCompDirectiveNoFileComp
} else {
return nil, cobra.ShellCompDirectiveNoFileComp
}
}
return cmd
}

View File

@ -1,20 +1,69 @@
package bundle
import (
"context"
"encoding/json"
"fmt"
"github.com/databricks/cli/bundle"
"github.com/databricks/cli/bundle/deploy/terraform"
"github.com/databricks/cli/bundle/phases"
"github.com/databricks/cli/bundle/resources"
"github.com/databricks/cli/bundle/run"
"github.com/databricks/cli/bundle/run/output"
"github.com/databricks/cli/cmd/bundle/utils"
"github.com/databricks/cli/cmd/root"
"github.com/databricks/cli/libs/cmdio"
"github.com/databricks/cli/libs/flags"
"github.com/spf13/cobra"
"golang.org/x/exp/maps"
)
func promptRunArgument(ctx context.Context, b *bundle.Bundle) (string, error) {
// Compute map of "Human readable name of resource" -> "resource key".
inv := make(map[string]string)
for k, ref := range resources.Completions(b, run.IsRunnable) {
title := fmt.Sprintf("%s: %s", ref.Description.SingularTitle, ref.Resource.GetName())
inv[title] = k
}
key, err := cmdio.Select(ctx, inv, "Resource to run")
if err != nil {
return "", err
}
return key, nil
}
func resolveRunArgument(ctx context.Context, b *bundle.Bundle, args []string) (string, error) {
// If no arguments are specified, prompt the user to select something to run.
if len(args) == 0 && cmdio.IsPromptSupported(ctx) {
return promptRunArgument(ctx, b)
}
if len(args) < 1 {
return "", fmt.Errorf("expected a KEY of the resource to run")
}
return args[0], nil
}
func keyToRunner(b *bundle.Bundle, arg string) (run.Runner, error) {
// Locate the resource to run.
ref, err := resources.Lookup(b, arg, run.IsRunnable)
if err != nil {
return nil, err
}
// Convert the resource to a runnable resource.
runner, err := run.ToRunner(b, ref)
if err != nil {
return nil, err
}
return runner, nil
}
func newRunCommand() *cobra.Command {
cmd := &cobra.Command{
Use: "run [flags] KEY",
@ -60,22 +109,9 @@ task or a Python wheel task, the second example applies.
return err
}
// If no arguments are specified, prompt the user to select something to run.
if len(args) == 0 && cmdio.IsPromptSupported(ctx) {
// Invert completions from KEY -> NAME, to NAME -> KEY.
inv := make(map[string]string)
for k, v := range run.ResourceCompletionMap(b) {
inv[v] = k
}
id, err := cmdio.Select(ctx, inv, "Resource to run")
if err != nil {
return err
}
args = append(args, id)
}
if len(args) < 1 {
return fmt.Errorf("expected a KEY of the resource to run")
arg, err := resolveRunArgument(ctx, b, args)
if err != nil {
return err
}
diags = bundle.Apply(ctx, b, bundle.Seq(
@ -88,7 +124,7 @@ task or a Python wheel task, the second example applies.
return err
}
runner, err := run.Find(b, args[0])
runner, err := keyToRunner(b, arg)
if err != nil {
return err
}
@ -100,19 +136,16 @@ task or a Python wheel task, the second example applies.
}
runOptions.NoWait = noWait
var output output.RunOutput
if restart {
s := cmdio.Spinner(ctx)
s <- "Cancelling all runs"
err := runner.Cancel(ctx)
close(s)
if err != nil {
return err
}
output, err = runner.Restart(ctx, &runOptions)
} else {
output, err = runner.Run(ctx, &runOptions)
}
output, err := runner.Run(ctx, &runOptions)
if err != nil {
return err
}
if output != nil {
switch root.OutputType(cmd) {
case flags.OutputText:
@ -148,10 +181,11 @@ task or a Python wheel task, the second example applies.
}
if len(args) == 0 {
return run.ResourceCompletions(b), cobra.ShellCompDirectiveNoFileComp
completions := resources.Completions(b, run.IsRunnable)
return maps.Keys(completions), cobra.ShellCompDirectiveNoFileComp
} else {
// If we know the resource to run, we can complete additional positional arguments.
runner, err := run.Find(b, args[0])
runner, err := keyToRunner(b, args[0])
if err != nil {
return nil, cobra.ShellCompDirectiveError
}

View File

@ -8,8 +8,10 @@ import (
"path/filepath"
"github.com/databricks/cli/bundle"
"github.com/databricks/cli/bundle/config/mutator"
"github.com/databricks/cli/bundle/deploy/terraform"
"github.com/databricks/cli/bundle/phases"
"github.com/databricks/cli/bundle/render"
"github.com/databricks/cli/cmd/bundle/utils"
"github.com/databricks/cli/cmd/root"
"github.com/databricks/cli/libs/flags"
@ -19,11 +21,8 @@ import (
func newSummaryCommand() *cobra.Command {
cmd := &cobra.Command{
Use: "summary",
Short: "Describe the bundle resources and their deployment states",
Short: "Summarize resources deployed by this bundle",
Args: root.NoArgs,
// This command is currently intended for the Databricks VSCode extension only
Hidden: true,
}
var forcePull bool
@ -60,14 +59,15 @@ func newSummaryCommand() *cobra.Command {
}
}
diags = bundle.Apply(ctx, b, terraform.Load())
diags = bundle.Apply(ctx, b,
bundle.Seq(terraform.Load(), mutator.InitializeURLs()))
if err := diags.Error(); err != nil {
return err
}
switch root.OutputType(cmd) {
case flags.OutputText:
return fmt.Errorf("%w, only json output is supported", errors.ErrUnsupported)
return render.RenderSummary(ctx, cmd.OutOrStdout(), b)
case flags.OutputJSON:
buf, err := json.MarshalIndent(b.Config, "", " ")
if err != nil {

View File

@ -1,7 +1,9 @@
package bundle
import (
"context"
"fmt"
"io"
"time"
"github.com/databricks/cli/bundle"
@ -9,6 +11,7 @@ import (
"github.com/databricks/cli/bundle/phases"
"github.com/databricks/cli/cmd/bundle/utils"
"github.com/databricks/cli/cmd/root"
"github.com/databricks/cli/libs/flags"
"github.com/databricks/cli/libs/log"
"github.com/databricks/cli/libs/sync"
"github.com/spf13/cobra"
@ -18,6 +21,7 @@ type syncFlags struct {
interval time.Duration
full bool
watch bool
output flags.Output
}
func (f *syncFlags) syncOptionsFromBundle(cmd *cobra.Command, b *bundle.Bundle) (*sync.SyncOptions, error) {
@ -26,6 +30,21 @@ func (f *syncFlags) syncOptionsFromBundle(cmd *cobra.Command, b *bundle.Bundle)
return nil, fmt.Errorf("cannot get sync options: %w", err)
}
if f.output != "" {
var outputFunc func(context.Context, <-chan sync.Event, io.Writer)
switch f.output {
case flags.OutputText:
outputFunc = sync.TextOutput
case flags.OutputJSON:
outputFunc = sync.JsonOutput
}
if outputFunc != nil {
opts.OutputHandler = func(ctx context.Context, c <-chan sync.Event) {
outputFunc(ctx, c, cmd.OutOrStdout())
}
}
}
opts.Full = f.full
opts.PollInterval = f.interval
return opts, nil
@ -42,6 +61,7 @@ func newSyncCommand() *cobra.Command {
cmd.Flags().DurationVar(&f.interval, "interval", 1*time.Second, "file system polling interval (for --watch)")
cmd.Flags().BoolVar(&f.full, "full", false, "perform full synchronization (default is incremental)")
cmd.Flags().BoolVar(&f.watch, "watch", false, "watch local file system for changes")
cmd.Flags().Var(&f.output, "output", "type of the output format")
cmd.RunE = func(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
@ -65,6 +85,7 @@ func newSyncCommand() *cobra.Command {
if err != nil {
return err
}
defer s.Close()
log.Infof(ctx, "Remote file sync location: %v", opts.RemotePath)

View File

@ -54,7 +54,7 @@ func newValidateCommand() *cobra.Command {
switch root.OutputType(cmd) {
case flags.OutputText:
renderOpts := render.RenderOptions{RenderSummaryTable: true}
err := render.RenderTextOutput(cmd.OutOrStdout(), b, diags, renderOpts)
err := render.RenderDiagnostics(cmd.OutOrStdout(), b, diags, renderOpts)
if err != nil {
return fmt.Errorf("failed to render output: %w", err)
}

View File

@ -28,9 +28,6 @@ func New() *cobra.Command {
Annotations: map[string]string{
"package": "apps",
},
// This service is being previewed; hide from help output.
Hidden: true,
}
// Add methods

View File

@ -0,0 +1,220 @@
// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
package disable_legacy_dbfs
import (
"fmt"
"github.com/databricks/cli/cmd/root"
"github.com/databricks/cli/libs/cmdio"
"github.com/databricks/cli/libs/flags"
"github.com/databricks/databricks-sdk-go/service/settings"
"github.com/spf13/cobra"
)
// Slice with functions to override default command behavior.
// Functions can be added from the `init()` function in manually curated files in this directory.
var cmdOverrides []func(*cobra.Command)
func New() *cobra.Command {
cmd := &cobra.Command{
Use: "disable-legacy-dbfs",
Short: `When this setting is on, access to DBFS root and DBFS mounts is disallowed (as well as creation of new mounts).`,
Long: `When this setting is on, access to DBFS root and DBFS mounts is disallowed (as
well as creation of new mounts). When the setting is off, all DBFS
functionality is enabled`,
// This service is being previewed; hide from help output.
Hidden: true,
}
// Add methods
cmd.AddCommand(newDelete())
cmd.AddCommand(newGet())
cmd.AddCommand(newUpdate())
// Apply optional overrides to this command.
for _, fn := range cmdOverrides {
fn(cmd)
}
return cmd
}
// start delete command
// Slice with functions to override default command behavior.
// Functions can be added from the `init()` function in manually curated files in this directory.
var deleteOverrides []func(
*cobra.Command,
*settings.DeleteDisableLegacyDbfsRequest,
)
func newDelete() *cobra.Command {
cmd := &cobra.Command{}
var deleteReq settings.DeleteDisableLegacyDbfsRequest
// TODO: short flags
cmd.Flags().StringVar(&deleteReq.Etag, "etag", deleteReq.Etag, `etag used for versioning.`)
cmd.Use = "delete"
cmd.Short = `Delete the disable legacy DBFS setting.`
cmd.Long = `Delete the disable legacy DBFS setting.
Deletes the disable legacy DBFS setting for a workspace, reverting back to the
default.`
cmd.Annotations = make(map[string]string)
cmd.Args = func(cmd *cobra.Command, args []string) error {
check := root.ExactArgs(0)
return check(cmd, args)
}
cmd.PreRunE = root.MustWorkspaceClient
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
ctx := cmd.Context()
w := root.WorkspaceClient(ctx)
response, err := w.Settings.DisableLegacyDbfs().Delete(ctx, deleteReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
}
// Disable completions since they are not applicable.
// Can be overridden by manual implementation in `override.go`.
cmd.ValidArgsFunction = cobra.NoFileCompletions
// Apply optional overrides to this command.
for _, fn := range deleteOverrides {
fn(cmd, &deleteReq)
}
return cmd
}
// start get command
// Slice with functions to override default command behavior.
// Functions can be added from the `init()` function in manually curated files in this directory.
var getOverrides []func(
*cobra.Command,
*settings.GetDisableLegacyDbfsRequest,
)
func newGet() *cobra.Command {
cmd := &cobra.Command{}
var getReq settings.GetDisableLegacyDbfsRequest
// TODO: short flags
cmd.Flags().StringVar(&getReq.Etag, "etag", getReq.Etag, `etag used for versioning.`)
cmd.Use = "get"
cmd.Short = `Get the disable legacy DBFS setting.`
cmd.Long = `Get the disable legacy DBFS setting.
Gets the disable legacy DBFS setting.`
cmd.Annotations = make(map[string]string)
cmd.Args = func(cmd *cobra.Command, args []string) error {
check := root.ExactArgs(0)
return check(cmd, args)
}
cmd.PreRunE = root.MustWorkspaceClient
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
ctx := cmd.Context()
w := root.WorkspaceClient(ctx)
response, err := w.Settings.DisableLegacyDbfs().Get(ctx, getReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
}
// Disable completions since they are not applicable.
// Can be overridden by manual implementation in `override.go`.
cmd.ValidArgsFunction = cobra.NoFileCompletions
// Apply optional overrides to this command.
for _, fn := range getOverrides {
fn(cmd, &getReq)
}
return cmd
}
// start update command
// Slice with functions to override default command behavior.
// Functions can be added from the `init()` function in manually curated files in this directory.
var updateOverrides []func(
*cobra.Command,
*settings.UpdateDisableLegacyDbfsRequest,
)
func newUpdate() *cobra.Command {
cmd := &cobra.Command{}
var updateReq settings.UpdateDisableLegacyDbfsRequest
var updateJson flags.JsonFlag
// TODO: short flags
cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`)
cmd.Use = "update"
cmd.Short = `Update the disable legacy DBFS setting.`
cmd.Long = `Update the disable legacy DBFS setting.
Updates the disable legacy DBFS setting for the workspace.`
cmd.Annotations = make(map[string]string)
cmd.PreRunE = root.MustWorkspaceClient
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
ctx := cmd.Context()
w := root.WorkspaceClient(ctx)
if cmd.Flags().Changed("json") {
diags := updateJson.Unmarshal(&updateReq)
if diags.HasError() {
return diags.Error()
}
if len(diags) > 0 {
err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags)
if err != nil {
return err
}
}
} else {
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
}
response, err := w.Settings.DisableLegacyDbfs().Update(ctx, updateReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
}
// Disable completions since they are not applicable.
// Can be overridden by manual implementation in `override.go`.
cmd.ValidArgsFunction = cobra.NoFileCompletions
// Apply optional overrides to this command.
for _, fn := range updateOverrides {
fn(cmd, &updateReq)
}
return cmd
}
// end service DisableLegacyDbfs

View File

@ -1557,6 +1557,7 @@ func newSubmit() *cobra.Command {
cmd.Flags().Var(&submitJson, "json", `either inline JSON string or @path/to/file.json with request body`)
// TODO: array: access_control_list
cmd.Flags().StringVar(&submitReq.BudgetPolicyId, "budget-policy-id", submitReq.BudgetPolicyId, `The user specified id of the budget policy to use for this one-time run.`)
// TODO: complex arg: email_notifications
// TODO: array: environments
// TODO: complex arg: git_source

View File

@ -9,6 +9,7 @@ import (
compliance_security_profile "github.com/databricks/cli/cmd/workspace/compliance-security-profile"
default_namespace "github.com/databricks/cli/cmd/workspace/default-namespace"
disable_legacy_access "github.com/databricks/cli/cmd/workspace/disable-legacy-access"
disable_legacy_dbfs "github.com/databricks/cli/cmd/workspace/disable-legacy-dbfs"
enhanced_security_monitoring "github.com/databricks/cli/cmd/workspace/enhanced-security-monitoring"
restrict_workspace_admins "github.com/databricks/cli/cmd/workspace/restrict-workspace-admins"
)
@ -33,6 +34,7 @@ func New() *cobra.Command {
cmd.AddCommand(compliance_security_profile.New())
cmd.AddCommand(default_namespace.New())
cmd.AddCommand(disable_legacy_access.New())
cmd.AddCommand(disable_legacy_dbfs.New())
cmd.AddCommand(enhanced_security_monitoring.New())
cmd.AddCommand(restrict_workspace_admins.New())

4
go.mod
View File

@ -7,8 +7,8 @@ toolchain go1.22.7
require (
github.com/Masterminds/semver/v3 v3.3.0 // MIT
github.com/briandowns/spinner v1.23.1 // Apache 2.0
github.com/databricks/databricks-sdk-go v0.48.0 // Apache 2.0
github.com/fatih/color v1.17.0 // MIT
github.com/databricks/databricks-sdk-go v0.49.0 // Apache 2.0
github.com/fatih/color v1.18.0 // MIT
github.com/ghodss/yaml v1.0.0 // MIT + NOTICE
github.com/google/uuid v1.6.0 // BSD-3-Clause
github.com/hashicorp/go-version v1.7.0 // MPL 2.0

8
go.sum generated
View File

@ -32,8 +32,8 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg=
github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
github.com/databricks/databricks-sdk-go v0.48.0 h1:46KtsnRo+FGhC3izUXbpL0PXBNomvsdignYDhJZlm9s=
github.com/databricks/databricks-sdk-go v0.48.0/go.mod h1:ds+zbv5mlQG7nFEU5ojLtgN/u0/9YzZmKQES/CfedzU=
github.com/databricks/databricks-sdk-go v0.49.0 h1:VBTeZZMLIuBSM4kxOCfUcW9z4FUQZY2QeNRD5qm9FUQ=
github.com/databricks/databricks-sdk-go v0.49.0/go.mod h1:ds+zbv5mlQG7nFEU5ojLtgN/u0/9YzZmKQES/CfedzU=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@ -44,8 +44,8 @@ github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.m
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
github.com/fatih/color v1.17.0 h1:GlRw1BRJxkpqUCBKzKOw098ed57fEsKeNjpTe3cSjK4=
github.com/fatih/color v1.17.0/go.mod h1:YZ7TlrGPkiz6ku9fK3TLD/pl3CpsiFyu8N92HLgmosI=
github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM=
github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU=
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=

View File

@ -2,11 +2,14 @@ package acc
import (
"context"
"fmt"
"os"
"testing"
"github.com/databricks/databricks-sdk-go"
"github.com/databricks/databricks-sdk-go/apierr"
"github.com/databricks/databricks-sdk-go/service/compute"
"github.com/databricks/databricks-sdk-go/service/workspace"
"github.com/stretchr/testify/require"
)
@ -94,3 +97,30 @@ func (t *WorkspaceT) RunPython(code string) (string, error) {
require.True(t, ok, "unexpected type %T", results.Data)
return output, nil
}
func (t *WorkspaceT) TemporaryWorkspaceDir(name ...string) string {
ctx := context.Background()
me, err := t.W.CurrentUser.Me(ctx)
require.NoError(t, err)
basePath := fmt.Sprintf("/Users/%s/%s", me.UserName, RandomName(name...))
t.Logf("Creating %s", basePath)
err = t.W.Workspace.MkdirsByPath(ctx, basePath)
require.NoError(t, err)
// Remove test directory on test completion.
t.Cleanup(func() {
t.Logf("Removing %s", basePath)
err := t.W.Workspace.Delete(ctx, workspace.Delete{
Path: basePath,
Recursive: true,
})
if err == nil || apierr.IsMissing(err) {
return
}
t.Logf("Unable to remove temporary workspace directory %s: %#v", basePath, err)
})
return basePath
}

View File

@ -0,0 +1,13 @@
# Bugbash
The script in this directory can be used to conveniently exec into a shell
where a CLI build for a specific branch is made available.
## Usage
This script prompts if you do NOT have at least Bash 5 installed,
but works without command completion with earlier versions.
```shell
bash <(curl -fsSL https://raw.githubusercontent.com/databricks/cli/main/internal/bugbash/exec.sh) my-branch
```

139
internal/bugbash/exec.sh Executable file
View File

@ -0,0 +1,139 @@
#!/usr/bin/env bash
set -euo pipefail
# Set the GitHub repository for the Databricks CLI.
export GH_REPO="databricks/cli"
# Synthesize the directory name for the snapshot build.
function cli_snapshot_directory() {
dir="cli"
# Append OS
case "$(uname -s)" in
Linux)
dir="${dir}_linux"
;;
Darwin)
dir="${dir}_darwin"
;;
*)
echo "Unknown operating system: $os"
;;
esac
# Append architecture
case "$(uname -m)" in
x86_64)
dir="${dir}_amd64_v1"
;;
i386|i686)
dir="${dir}_386"
;;
arm64|aarch64)
dir="${dir}_arm64"
;;
armv7l|armv8l)
dir="${dir}_arm_6"
;;
*)
echo "Unknown architecture: $arch"
;;
esac
echo $dir
}
BRANCH=$1
shift
# Default to main branch if branch is not specified.
if [ -z "$BRANCH" ]; then
BRANCH=main
fi
if [ -z "$BRANCH" ]; then
echo "Please specify which branch to bugbash..."
exit 1
fi
# Check if the "gh" command is available.
if ! command -v gh &> /dev/null; then
echo "The GitHub CLI (gh) is required to download the snapshot build."
echo "Install and configure it with:"
echo ""
echo " brew install gh"
echo " gh auth login"
echo ""
exit 1
fi
echo "Looking for a snapshot build of the Databricks CLI on branch $BRANCH..."
# Find last successful build on $BRANCH.
last_successful_run_id=$(
gh run list -b "$BRANCH" -w release-snapshot --json 'databaseId,conclusion' |
jq 'limit(1; .[] | select(.conclusion == "success")) | .databaseId'
)
if [ -z "$last_successful_run_id" ]; then
echo "Unable to find last successful build of the release-snapshot workflow for branch $BRANCH."
exit 1
fi
# Determine artifact name with the right binaries for this runner.
case "$(uname -s)" in
Linux)
artifact="cli_linux_snapshot"
;;
Darwin)
artifact="cli_darwin_snapshot"
;;
esac
# Create a temporary directory to download the artifact.
dir=$(mktemp -d)
# Download the artifact.
echo "Downloading the snapshot build..."
gh run download "$last_successful_run_id" -n "$artifact" -D "$dir/.bin"
dir="$dir/.bin/$(cli_snapshot_directory)"
if [ ! -d "$dir" ]; then
echo "Directory does not exist: $dir"
exit 1
fi
# Make CLI available on $PATH.
chmod +x "$dir/databricks"
export PATH="$dir:$PATH"
# Set the prompt to indicate the bugbash environment and exec.
export PS1="(bugbash $BRANCH) \[\033[01;32m\]\u@\h\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\]\$ "
# Display completion instructions.
echo ""
echo "=================================================================="
if [[ ${BASH_VERSINFO[0]} -lt 5 ]]; then
echo -en "\033[31m"
echo "You have Bash version < 5 installed... completion won't work."
echo -en "\033[0m"
echo ""
echo "Install it with:"
echo ""
echo " brew install bash bash-completion"
echo ""
echo "=================================================================="
fi
echo ""
echo "To load completions in your current shell session:"
echo ""
echo " source /opt/homebrew/etc/profile.d/bash_completion.sh"
echo " source <(databricks completion bash)"
echo ""
echo "=================================================================="
echo ""
# Exec into a new shell.
# Note: don't use zsh because on macOS it _always_ overwrites PS1.
exec /usr/bin/env bash

View File

@ -0,0 +1,73 @@
package build
import (
"encoding/json"
"fmt"
"io"
"net/http"
"os"
"os/exec"
"strings"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"golang.org/x/mod/modfile"
)
// This test ensures that the OpenAPI SHA the CLI is being generated from matches
// the OpenAPI SHA of the Go SDK version used in the CLI. We should always upgrade
// the Go SDK version before generating the CLI because downstream generated assets
// like the bundle schema depend on the Go SDK itself.
func TestConsistentDatabricksSdkVersion(t *testing.T) {
// Read the go.mod file
b, err := os.ReadFile("../../go.mod")
require.NoError(t, err)
// Parse the go.mod file to get the databricks-sdk version
modFile, err := modfile.Parse("../../go.mod", b, nil)
require.NoError(t, err)
modulePath := "github.com/databricks/databricks-sdk-go"
var version string
for _, r := range modFile.Require {
if r.Mod.Path == modulePath {
version = r.Mod.Version
}
}
require.NotEmpty(t, version)
// Full path of the package. For example: github.com/databricks/databricks-sdk-go@v0.47.1-0.20241002195128-6cecc224cbf7
fullPath := fmt.Sprintf("%s@%s", modulePath, version)
type goListResponse struct {
Origin struct {
Hash string
}
}
// Using the go CLI query for the git hash corresponding to the databricks-sdk-go version
cmd := exec.Command("go", "list", "-m", "-json", "-mod=readonly", fullPath)
out, err := cmd.Output()
require.NoError(t, err)
parsedOutput := new(goListResponse)
err = json.Unmarshal(out, parsedOutput)
require.NoError(t, err)
hash := parsedOutput.Origin.Hash
require.NotEmpty(t, hash)
// Read the OpenAPI SHA from the Go SDK.
url := fmt.Sprintf("https://raw.githubusercontent.com/databricks/databricks-sdk-go/%s/.codegen/_openapi_sha", hash)
resp, err := http.Get(url)
require.NoError(t, err)
defer resp.Body.Close()
require.Equal(t, http.StatusOK, resp.StatusCode)
sdkSha, err := io.ReadAll(resp.Body)
require.NoError(t, err)
cliSha, err := os.ReadFile("../../.codegen/_openapi_sha")
require.NoError(t, err)
assert.Equal(t, strings.TrimSpace(string(cliSha)), strings.TrimSpace(string(sdkSha)), "please update the SDK version before generating the CLI")
}

View File

@ -0,0 +1,12 @@
{
"properties": {
"unique_id": {
"type": "string",
"description": "Unique ID for job name"
},
"warehouse_id": {
"type": "string",
"description": "The SQL warehouse ID to use for the dashboard"
}
}
}

View File

@ -0,0 +1,34 @@
{
"pages": [
{
"displayName": "New Page",
"layout": [
{
"position": {
"height": 2,
"width": 6,
"x": 0,
"y": 0
},
"widget": {
"name": "82eb9107",
"textbox_spec": "# I'm a title"
}
},
{
"position": {
"height": 2,
"width": 6,
"x": 0,
"y": 2
},
"widget": {
"name": "ffa6de4f",
"textbox_spec": "Text"
}
}
],
"name": "fdd21a3c"
}
]
}

View File

@ -0,0 +1,12 @@
bundle:
name: dashboards
workspace:
root_path: "~/.bundle/{{.unique_id}}"
resources:
dashboards:
file_reference:
display_name: test-dashboard-{{.unique_id}}
file_path: ./dashboard.lvdash.json
warehouse_id: {{.warehouse_id}}

Some files were not shown because too many files have changed in this diff Show More