Merge remote-tracking branch 'databricks/main' into cp-better-errors

This commit is contained in:
Lennart Kats 2024-04-25 09:33:08 +02:00
commit 7cdfc8d8ff
No known key found for this signature in database
GPG Key ID: 1EB8B57673197023
124 changed files with 3306 additions and 4744 deletions

16
.github/workflows/publish-winget.yml vendored Normal file
View File

@ -0,0 +1,16 @@
name: publish-winget
on:
workflow_dispatch:
jobs:
publish-to-winget-pkgs:
runs-on: windows-latest
environment: release
steps:
- uses: vedantmgoyal2009/winget-releaser@93fd8b606a1672ec3e5c6c3bb19426be68d1a8b0 # https://github.com/vedantmgoyal2009/winget-releaser/releases/tag/v2
with:
identifier: Databricks.DatabricksCLI
installers-regex: 'windows_.*-signed\.zip$' # Only signed Windows releases
token: ${{ secrets.ENG_DEV_ECOSYSTEM_BOT_TOKEN }}
fork-user: eng-dev-ecosystem-bot

View File

@ -84,7 +84,7 @@ jobs:
with: with:
github-token: ${{ secrets.DECO_GITHUB_TOKEN }} github-token: ${{ secrets.DECO_GITHUB_TOKEN }}
script: | script: |
let artifacts = JSON.parse('${{ needs.goreleaser.outputs.artifacts }}') let artifacts = ${{ needs.goreleaser.outputs.artifacts }}
artifacts = artifacts.filter(a => a.type == "Archive") artifacts = artifacts.filter(a => a.type == "Archive")
artifacts = new Map( artifacts = new Map(
artifacts.map(a => [ artifacts.map(a => [
@ -130,15 +130,3 @@ jobs:
version: "${{ env.VERSION }}", version: "${{ env.VERSION }}",
} }
}); });
publish-to-winget-pkgs:
needs: goreleaser
runs-on: windows-latest
environment: release
steps:
- uses: vedantmgoyal2009/winget-releaser@93fd8b606a1672ec3e5c6c3bb19426be68d1a8b0 # https://github.com/vedantmgoyal2009/winget-releaser/releases/tag/v2
with:
identifier: Databricks.DatabricksCLI
installers-regex: 'windows_.*\.zip$' # Only windows releases
token: ${{ secrets.ENG_DEV_ECOSYSTEM_BOT_TOKEN }}
fork-user: eng-dev-ecosystem-bot

View File

@ -1,5 +1,49 @@
# Version changelog # Version changelog
## 0.218.0
This release marks the general availability of Databricks Asset Bundles.
CLI:
* Publish Docker images ([#1353](https://github.com/databricks/cli/pull/1353)).
* Add support for multi-arch Docker images ([#1362](https://github.com/databricks/cli/pull/1362)).
* Do not prefill https:// in prompt for Databricks Host ([#1364](https://github.com/databricks/cli/pull/1364)).
* Add better documentation for the `auth login` command ([#1366](https://github.com/databricks/cli/pull/1366)).
* Add URLs for authentication documentation to the auth command help ([#1365](https://github.com/databricks/cli/pull/1365)).
Bundles:
* Fix compute override for foreach tasks ([#1357](https://github.com/databricks/cli/pull/1357)).
* Transform artifact files source patterns in build not upload stage ([#1359](https://github.com/databricks/cli/pull/1359)).
* Convert between integer and float in normalization ([#1371](https://github.com/databricks/cli/pull/1371)).
* Disable locking for development mode ([#1302](https://github.com/databricks/cli/pull/1302)).
* Resolve variable references inside variable lookup fields ([#1368](https://github.com/databricks/cli/pull/1368)).
* Added validate mutator to surface additional bundle warnings ([#1352](https://github.com/databricks/cli/pull/1352)).
* Upgrade terraform-provider-databricks to 1.40.0 ([#1376](https://github.com/databricks/cli/pull/1376)).
* Print host in `bundle validate` when passed via profile or environment variables ([#1378](https://github.com/databricks/cli/pull/1378)).
* Cleanup remote file path on bundle destroy ([#1374](https://github.com/databricks/cli/pull/1374)).
* Add docs URL for `run_as` in error message ([#1381](https://github.com/databricks/cli/pull/1381)).
* Enable job queueing by default ([#1385](https://github.com/databricks/cli/pull/1385)).
* Added support for job environments ([#1379](https://github.com/databricks/cli/pull/1379)).
* Processing and completion of positional args to bundle run ([#1120](https://github.com/databricks/cli/pull/1120)).
* Add legacy option for `run_as` ([#1384](https://github.com/databricks/cli/pull/1384)).
API Changes:
* Changed `databricks lakehouse-monitors cancel-refresh` command with new required argument order.
* Changed `databricks lakehouse-monitors create` command with new required argument order.
* Changed `databricks lakehouse-monitors delete` command with new required argument order.
* Changed `databricks lakehouse-monitors get` command with new required argument order.
* Changed `databricks lakehouse-monitors get-refresh` command with new required argument order.
* Changed `databricks lakehouse-monitors list-refreshes` command with new required argument order.
* Changed `databricks lakehouse-monitors run-refresh` command with new required argument order.
* Changed `databricks lakehouse-monitors update` command with new required argument order.
* Changed `databricks account workspace-assignment update` command to return response.
OpenAPI commit 94684175b8bd65f8701f89729351f8069e8309c9 (2024-04-11)
Dependency updates:
* Bump github.com/databricks/databricks-sdk-go from 0.37.0 to 0.38.0 ([#1361](https://github.com/databricks/cli/pull/1361)).
* Bump golang.org/x/net from 0.22.0 to 0.23.0 ([#1380](https://github.com/databricks/cli/pull/1380)).
## 0.217.1 ## 0.217.1
CLI: CLI:

View File

@ -1,6 +1,7 @@
FROM alpine:3.19 as builder FROM alpine:3.19 as builder
RUN ["apk", "add", "jq"] RUN ["apk", "add", "jq"]
RUN ["apk", "add", "bash"]
WORKDIR /build WORKDIR /build

4
NOTICE
View File

@ -36,6 +36,10 @@ hashicorp/terraform-json - https://github.com/hashicorp/terraform-json
Copyright 2019 HashiCorp, Inc. Copyright 2019 HashiCorp, Inc.
License - https://github.com/hashicorp/terraform-json/blob/main/LICENSE License - https://github.com/hashicorp/terraform-json/blob/main/LICENSE
hashicorp/terraform - https://github.com/hashicorp/terraform
Copyright 2014 HashiCorp, Inc.
License - https://github.com/hashicorp/terraform/blob/v1.5.5/LICENSE
--- ---
This software contains code from the following open source projects, licensed under the BSD (2-clause) license: This software contains code from the following open source projects, licensed under the BSD (2-clause) license:

View File

@ -12,7 +12,6 @@ import (
"github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle"
"github.com/databricks/cli/bundle/artifacts/whl" "github.com/databricks/cli/bundle/artifacts/whl"
"github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config"
"github.com/databricks/cli/bundle/libraries"
"github.com/databricks/cli/libs/cmdio" "github.com/databricks/cli/libs/cmdio"
"github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/diag"
"github.com/databricks/cli/libs/filer" "github.com/databricks/cli/libs/filer"
@ -117,8 +116,6 @@ func (m *basicUpload) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnost
} }
func uploadArtifact(ctx context.Context, b *bundle.Bundle, a *config.Artifact, uploadPath string, client filer.Filer) error { func uploadArtifact(ctx context.Context, b *bundle.Bundle, a *config.Artifact, uploadPath string, client filer.Filer) error {
filesToLibraries := libraries.MapFilesToTaskLibraries(ctx, b)
for i := range a.Files { for i := range a.Files {
f := &a.Files[i] f := &a.Files[i]
@ -133,24 +130,32 @@ func uploadArtifact(ctx context.Context, b *bundle.Bundle, a *config.Artifact, u
log.Infof(ctx, "Upload succeeded") log.Infof(ctx, "Upload succeeded")
f.RemotePath = path.Join(uploadPath, filepath.Base(f.Source)) f.RemotePath = path.Join(uploadPath, filepath.Base(f.Source))
// Lookup all tasks that reference this file. // TODO: confirm if we still need to update the remote path to start with /Workspace
libs, ok := filesToLibraries[f.Source] wsfsBase := "/Workspace"
if !ok { remotePath := path.Join(wsfsBase, f.RemotePath)
log.Debugf(ctx, "No tasks reference %s", f.Source)
continue
}
// Update all tasks that reference this file. for _, job := range b.Config.Resources.Jobs {
for _, lib := range libs { for i := range job.Tasks {
wsfsBase := "/Workspace" task := &job.Tasks[i]
remotePath := path.Join(wsfsBase, f.RemotePath) for j := range task.Libraries {
if lib.Whl != "" { lib := &task.Libraries[j]
lib.Whl = remotePath if lib.Whl != "" && isArtifactMatchLibrary(f, lib.Whl, b) {
continue lib.Whl = remotePath
}
if lib.Jar != "" && isArtifactMatchLibrary(f, lib.Jar, b) {
lib.Jar = remotePath
}
}
} }
if lib.Jar != "" {
lib.Jar = remotePath for i := range job.Environments {
continue env := &job.Environments[i]
for j := range env.Spec.Dependencies {
lib := env.Spec.Dependencies[j]
if isArtifactMatchLibrary(f, lib, b) {
env.Spec.Dependencies[j] = remotePath
}
}
} }
} }
} }
@ -158,6 +163,26 @@ func uploadArtifact(ctx context.Context, b *bundle.Bundle, a *config.Artifact, u
return nil return nil
} }
func isArtifactMatchLibrary(f *config.ArtifactFile, libPath string, b *bundle.Bundle) bool {
if !filepath.IsAbs(libPath) {
libPath = filepath.Join(b.RootPath, libPath)
}
// libPath can be a glob pattern, so do the match first
matches, err := filepath.Glob(libPath)
if err != nil {
return false
}
for _, m := range matches {
if m == f.Source {
return true
}
}
return false
}
// Function to upload artifact file to Workspace // Function to upload artifact file to Workspace
func uploadArtifactFile(ctx context.Context, file string, client filer.Filer) error { func uploadArtifactFile(ctx context.Context, file string, client filer.Filer) error {
raw, err := os.ReadFile(file) raw, err := os.ReadFile(file)

View File

@ -0,0 +1,91 @@
package artifacts
import (
"context"
"path/filepath"
"testing"
"github.com/databricks/cli/bundle"
"github.com/databricks/cli/bundle/config"
"github.com/databricks/cli/bundle/config/resources"
mockfiler "github.com/databricks/cli/internal/mocks/libs/filer"
"github.com/databricks/cli/internal/testutil"
"github.com/databricks/cli/libs/filer"
"github.com/databricks/databricks-sdk-go/service/compute"
"github.com/databricks/databricks-sdk-go/service/jobs"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
)
func TestArtifactUpload(t *testing.T) {
tmpDir := t.TempDir()
whlFolder := filepath.Join(tmpDir, "whl")
testutil.Touch(t, whlFolder, "source.whl")
whlLocalPath := filepath.Join(whlFolder, "source.whl")
b := &bundle.Bundle{
RootPath: tmpDir,
Config: config.Root{
Workspace: config.Workspace{
ArtifactPath: "/foo/bar/artifacts",
},
Artifacts: config.Artifacts{
"whl": {
Type: config.ArtifactPythonWheel,
Files: []config.ArtifactFile{
{Source: whlLocalPath},
},
},
},
Resources: config.Resources{
Jobs: map[string]*resources.Job{
"job": {
JobSettings: &jobs.JobSettings{
Tasks: []jobs.Task{
{
Libraries: []compute.Library{
{
Whl: filepath.Join("whl", "*.whl"),
},
{
Whl: "/Workspace/Users/foo@bar.com/mywheel.whl",
},
},
},
},
Environments: []jobs.JobEnvironment{
{
Spec: &compute.Environment{
Dependencies: []string{
filepath.Join("whl", "source.whl"),
"/Workspace/Users/foo@bar.com/mywheel.whl",
},
},
},
},
},
},
},
},
},
}
artifact := b.Config.Artifacts["whl"]
mockFiler := mockfiler.NewMockFiler(t)
mockFiler.EXPECT().Write(
mock.Anything,
filepath.Join("source.whl"),
mock.AnythingOfType("*bytes.Reader"),
filer.OverwriteIfExists,
filer.CreateParentDirectories,
).Return(nil)
err := uploadArtifact(context.Background(), b, artifact, "/foo/bar/artifacts", mockFiler)
require.NoError(t, err)
// Test that libraries path is updated
require.Equal(t, "/Workspace/foo/bar/artifacts/source.whl", b.Config.Resources.Jobs["job"].JobSettings.Tasks[0].Libraries[0].Whl)
require.Equal(t, "/Workspace/Users/foo@bar.com/mywheel.whl", b.Config.Resources.Jobs["job"].JobSettings.Tasks[0].Libraries[1].Whl)
require.Equal(t, "/Workspace/foo/bar/artifacts/source.whl", b.Config.Resources.Jobs["job"].JobSettings.Environments[0].Spec.Dependencies[0])
require.Equal(t, "/Workspace/Users/foo@bar.com/mywheel.whl", b.Config.Resources.Jobs["job"].JobSettings.Environments[0].Spec.Dependencies[1])
}

View File

@ -30,24 +30,18 @@ func (*fromLibraries) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnost
tasks := libraries.FindAllWheelTasksWithLocalLibraries(b) tasks := libraries.FindAllWheelTasksWithLocalLibraries(b)
for _, task := range tasks { for _, task := range tasks {
for _, lib := range task.Libraries { for _, lib := range task.Libraries {
matches, err := filepath.Glob(filepath.Join(b.RootPath, lib.Whl)) matchAndAdd(ctx, lib.Whl, b)
// File referenced from libraries section does not exists, skipping }
if err != nil { }
continue
}
for _, match := range matches { envs := libraries.FindAllEnvironments(b)
name := filepath.Base(match) for _, jobEnvs := range envs {
if b.Config.Artifacts == nil { for _, env := range jobEnvs {
b.Config.Artifacts = make(map[string]*config.Artifact) if env.Spec != nil {
} for _, dep := range env.Spec.Dependencies {
if libraries.IsEnvironmentDependencyLocal(dep) {
log.Debugf(ctx, "Adding an artifact block for %s", match) matchAndAdd(ctx, dep, b)
b.Config.Artifacts[name] = &config.Artifact{ }
Files: []config.ArtifactFile{
{Source: match},
},
Type: config.ArtifactPythonWheel,
} }
} }
} }
@ -55,3 +49,26 @@ func (*fromLibraries) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnost
return nil return nil
} }
func matchAndAdd(ctx context.Context, lib string, b *bundle.Bundle) {
matches, err := filepath.Glob(filepath.Join(b.RootPath, lib))
// File referenced from libraries section does not exists, skipping
if err != nil {
return
}
for _, match := range matches {
name := filepath.Base(match)
if b.Config.Artifacts == nil {
b.Config.Artifacts = make(map[string]*config.Artifact)
}
log.Debugf(ctx, "Adding an artifact block for %s", match)
b.Config.Artifacts[name] = &config.Artifact{
Files: []config.ArtifactFile{
{Source: match},
},
Type: config.ArtifactPythonWheel,
}
}
}

View File

@ -0,0 +1,36 @@
package bundle
import (
"context"
"github.com/databricks/cli/bundle/config"
"github.com/databricks/databricks-sdk-go"
)
type ReadOnlyBundle struct {
b *Bundle
}
func ReadOnly(b *Bundle) ReadOnlyBundle {
return ReadOnlyBundle{b: b}
}
func (r ReadOnlyBundle) Config() config.Root {
return r.b.Config
}
func (r ReadOnlyBundle) RootPath() string {
return r.b.RootPath
}
func (r ReadOnlyBundle) WorkspaceClient() *databricks.WorkspaceClient {
return r.b.WorkspaceClient()
}
func (r ReadOnlyBundle) CacheDir(ctx context.Context, paths ...string) (string, error) {
return r.b.CacheDir(ctx, paths...)
}
func (r ReadOnlyBundle) GetSyncIncludePatterns(ctx context.Context) ([]string, error) {
return r.b.GetSyncIncludePatterns(ctx)
}

View File

@ -6,5 +6,5 @@ type Deployment struct {
FailOnActiveRuns bool `json:"fail_on_active_runs,omitempty"` FailOnActiveRuns bool `json:"fail_on_active_runs,omitempty"`
// Lock configures locking behavior on deployment. // Lock configures locking behavior on deployment.
Lock Lock `json:"lock" bundle:"readonly"` Lock Lock `json:"lock"`
} }

View File

@ -10,6 +10,19 @@ type Experimental struct {
// In this case the configured wheel task will be deployed as a notebook task which install defined wheel in runtime and executes it. // In this case the configured wheel task will be deployed as a notebook task which install defined wheel in runtime and executes it.
// For more details see https://github.com/databricks/cli/pull/797 and https://github.com/databricks/cli/pull/635 // For more details see https://github.com/databricks/cli/pull/797 and https://github.com/databricks/cli/pull/635
PythonWheelWrapper bool `json:"python_wheel_wrapper,omitempty"` PythonWheelWrapper bool `json:"python_wheel_wrapper,omitempty"`
// Enable legacy run_as behavior. That is:
// - Set the run_as identity as the owner of any pipelines in the bundle.
// - Do not error in the presence of resources that do not support run_as.
// As of April 2024 this includes pipelines and model serving endpoints.
//
// This mode of run_as requires the deploying user to be a workspace and metastore
// admin. Use of this flag is not recommend for new bundles, and it is only provided
// to unblock customers that are stuck due to breaking changes in the run_as behavior
// made in https://github.com/databricks/cli/pull/1233. This flag might
// be removed in the future once we have a proper workaround like allowing IS_OWNER
// as a top-level permission in the DAB.
UseLegacyRunAs bool `json:"use_legacy_run_as,omitempty"`
} }
type Command string type Command string

View File

@ -1,7 +1,7 @@
package config package config
type Lock struct { type Lock struct {
// Enabled toggles deployment lock. True by default. // Enabled toggles deployment lock. True by default except in development mode.
// Use a pointer value so that only explicitly configured values are set // Use a pointer value so that only explicitly configured values are set
// and we don't merge configuration with zero-initialized values. // and we don't merge configuration with zero-initialized values.
Enabled *bool `json:"enabled,omitempty"` Enabled *bool `json:"enabled,omitempty"`
@ -11,9 +11,20 @@ type Lock struct {
Force bool `json:"force,omitempty"` Force bool `json:"force,omitempty"`
} }
// IsEnabled checks if the deployment lock is enabled.
func (lock Lock) IsEnabled() bool { func (lock Lock) IsEnabled() bool {
if lock.Enabled != nil { if lock.Enabled != nil {
return *lock.Enabled return *lock.Enabled
} }
return true return true
} }
// IsExplicitlyEnabled checks if the deployment lock is explicitly enabled.
// Only returns true if locking is explicitly set using a command-line
// flag or configuration file.
func (lock Lock) IsExplicitlyEnabled() bool {
if lock.Enabled != nil {
return *lock.Enabled
}
return false
}

View File

@ -0,0 +1,38 @@
package mutator
import (
"context"
"github.com/databricks/cli/bundle"
"github.com/databricks/cli/libs/diag"
"github.com/databricks/databricks-sdk-go/service/jobs"
)
type defaultQueueing struct{}
func DefaultQueueing() bundle.Mutator {
return &defaultQueueing{}
}
func (m *defaultQueueing) Name() string {
return "DefaultQueueing"
}
// Enable queueing for jobs by default, following the behavior from API 2.2+.
// As of 2024-04, we're still using API 2.1 which has queueing disabled by default.
// This mutator makes sure queueing is enabled by default before we can adopt API 2.2.
func (m *defaultQueueing) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
r := b.Config.Resources
for i := range r.Jobs {
if r.Jobs[i].JobSettings == nil {
r.Jobs[i].JobSettings = &jobs.JobSettings{}
}
if r.Jobs[i].Queue != nil {
continue
}
r.Jobs[i].Queue = &jobs.QueueSettings{
Enabled: true,
}
}
return nil
}

View File

@ -0,0 +1,95 @@
package mutator
import (
"context"
"testing"
"github.com/databricks/cli/bundle"
"github.com/databricks/cli/bundle/config"
"github.com/databricks/cli/bundle/config/resources"
"github.com/databricks/databricks-sdk-go/service/jobs"
"github.com/stretchr/testify/assert"
)
func TestDefaultQueueing(t *testing.T) {
m := DefaultQueueing()
assert.IsType(t, &defaultQueueing{}, m)
}
func TestDefaultQueueingName(t *testing.T) {
m := DefaultQueueing()
assert.Equal(t, "DefaultQueueing", m.Name())
}
func TestDefaultQueueingApplyNoJobs(t *testing.T) {
b := &bundle.Bundle{
Config: config.Root{
Resources: config.Resources{},
},
}
d := bundle.Apply(context.Background(), b, DefaultQueueing())
assert.Len(t, d, 0)
assert.Len(t, b.Config.Resources.Jobs, 0)
}
func TestDefaultQueueingApplyJobsAlreadyEnabled(t *testing.T) {
b := &bundle.Bundle{
Config: config.Root{
Resources: config.Resources{
Jobs: map[string]*resources.Job{
"job": {
JobSettings: &jobs.JobSettings{
Queue: &jobs.QueueSettings{Enabled: true},
},
},
},
},
},
}
d := bundle.Apply(context.Background(), b, DefaultQueueing())
assert.Len(t, d, 0)
assert.True(t, b.Config.Resources.Jobs["job"].Queue.Enabled)
}
func TestDefaultQueueingApplyEnableQueueing(t *testing.T) {
b := &bundle.Bundle{
Config: config.Root{
Resources: config.Resources{
Jobs: map[string]*resources.Job{
"job": {},
},
},
},
}
d := bundle.Apply(context.Background(), b, DefaultQueueing())
assert.Len(t, d, 0)
assert.NotNil(t, b.Config.Resources.Jobs["job"].Queue)
assert.True(t, b.Config.Resources.Jobs["job"].Queue.Enabled)
}
func TestDefaultQueueingApplyWithMultipleJobs(t *testing.T) {
b := &bundle.Bundle{
Config: config.Root{
Resources: config.Resources{
Jobs: map[string]*resources.Job{
"job1": {
JobSettings: &jobs.JobSettings{
Queue: &jobs.QueueSettings{Enabled: false},
},
},
"job2": {},
"job3": {
JobSettings: &jobs.JobSettings{
Queue: &jobs.QueueSettings{Enabled: true},
},
},
},
},
},
}
d := bundle.Apply(context.Background(), b, DefaultQueueing())
assert.Len(t, d, 0)
assert.False(t, b.Config.Resources.Jobs["job1"].Queue.Enabled)
assert.True(t, b.Config.Resources.Jobs["job2"].Queue.Enabled)
assert.True(t, b.Config.Resources.Jobs["job3"].Queue.Enabled)
}

View File

@ -9,6 +9,7 @@ import (
"github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config"
"github.com/databricks/cli/libs/auth" "github.com/databricks/cli/libs/auth"
"github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/diag"
"github.com/databricks/cli/libs/dyn"
"github.com/databricks/cli/libs/log" "github.com/databricks/cli/libs/log"
"github.com/databricks/databricks-sdk-go/service/jobs" "github.com/databricks/databricks-sdk-go/service/jobs"
"github.com/databricks/databricks-sdk-go/service/ml" "github.com/databricks/databricks-sdk-go/service/ml"
@ -29,9 +30,16 @@ func (m *processTargetMode) Name() string {
// Mark all resources as being for 'development' purposes, i.e. // Mark all resources as being for 'development' purposes, i.e.
// changing their their name, adding tags, and (in the future) // changing their their name, adding tags, and (in the future)
// marking them as 'hidden' in the UI. // marking them as 'hidden' in the UI.
func transformDevelopmentMode(b *bundle.Bundle) diag.Diagnostics { func transformDevelopmentMode(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
r := b.Config.Resources if !b.Config.Bundle.Deployment.Lock.IsExplicitlyEnabled() {
log.Infof(ctx, "Development mode: disabling deployment lock since bundle.deployment.lock.enabled is not set to true")
err := disableDeploymentLock(b)
if err != nil {
return diag.FromErr(err)
}
}
r := b.Config.Resources
shortName := b.Config.Workspace.CurrentUser.ShortName shortName := b.Config.Workspace.CurrentUser.ShortName
prefix := "[dev " + shortName + "] " prefix := "[dev " + shortName + "] "
@ -100,6 +108,14 @@ func transformDevelopmentMode(b *bundle.Bundle) diag.Diagnostics {
return nil return nil
} }
func disableDeploymentLock(b *bundle.Bundle) error {
return b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) {
return dyn.Map(v, "bundle.deployment.lock", func(_ dyn.Path, v dyn.Value) (dyn.Value, error) {
return dyn.Set(v, "enabled", dyn.V(false))
})
})
}
func validateDevelopmentMode(b *bundle.Bundle) diag.Diagnostics { func validateDevelopmentMode(b *bundle.Bundle) diag.Diagnostics {
if path := findNonUserPath(b); path != "" { if path := findNonUserPath(b); path != "" {
return diag.Errorf("%s must start with '~/' or contain the current username when using 'mode: development'", path) return diag.Errorf("%s must start with '~/' or contain the current username when using 'mode: development'", path)
@ -163,7 +179,7 @@ func (m *processTargetMode) Apply(ctx context.Context, b *bundle.Bundle) diag.Di
if diags != nil { if diags != nil {
return diags return diags
} }
return transformDevelopmentMode(b) return transformDevelopmentMode(ctx, b)
case config.Production: case config.Production:
isPrincipal := auth.IsServicePrincipal(b.Config.Workspace.CurrentUser.UserName) isPrincipal := auth.IsServicePrincipal(b.Config.Workspace.CurrentUser.UserName)
return validateProductionMode(ctx, b, isPrincipal) return validateProductionMode(ctx, b, isPrincipal)

View File

@ -301,3 +301,23 @@ func TestAllResourcesRenamed(t *testing.T) {
} }
} }
} }
func TestDisableLocking(t *testing.T) {
ctx := context.Background()
b := mockBundle(config.Development)
err := transformDevelopmentMode(ctx, b)
require.Nil(t, err)
assert.False(t, b.Config.Bundle.Deployment.Lock.IsEnabled())
}
func TestDisableLockingDisabled(t *testing.T) {
ctx := context.Background()
b := mockBundle(config.Development)
explicitlyEnabled := true
b.Config.Bundle.Deployment.Lock.Enabled = &explicitlyEnabled
err := transformDevelopmentMode(ctx, b)
require.Nil(t, err)
assert.True(t, b.Config.Bundle.Deployment.Lock.IsEnabled(), "Deployment lock should remain enabled in development mode when explicitly enabled")
}

View File

@ -133,3 +133,64 @@ func TestResolveServicePrincipal(t *testing.T) {
require.NoError(t, diags.Error()) require.NoError(t, diags.Error())
require.Equal(t, "app-1234", *b.Config.Variables["my-sp"].Value) require.Equal(t, "app-1234", *b.Config.Variables["my-sp"].Value)
} }
func TestResolveVariableReferencesInVariableLookups(t *testing.T) {
s := func(s string) *string {
return &s
}
b := &bundle.Bundle{
Config: config.Root{
Bundle: config.Bundle{
Target: "dev",
},
Variables: map[string]*variable.Variable{
"foo": {
Value: s("bar"),
},
"lookup": {
Lookup: &variable.Lookup{
Cluster: "cluster-${var.foo}-${bundle.target}",
},
},
},
},
}
m := mocks.NewMockWorkspaceClient(t)
b.SetWorkpaceClient(m.WorkspaceClient)
clusterApi := m.GetMockClustersAPI()
clusterApi.EXPECT().GetByClusterName(mock.Anything, "cluster-bar-dev").Return(&compute.ClusterDetails{
ClusterId: "1234-5678-abcd",
}, nil)
diags := bundle.Apply(context.Background(), b, bundle.Seq(ResolveVariableReferencesInLookup(), ResolveResourceReferences()))
require.NoError(t, diags.Error())
require.Equal(t, "cluster-bar-dev", b.Config.Variables["lookup"].Lookup.Cluster)
require.Equal(t, "1234-5678-abcd", *b.Config.Variables["lookup"].Value)
}
func TestResolveLookupVariableReferencesInVariableLookups(t *testing.T) {
b := &bundle.Bundle{
Config: config.Root{
Variables: map[string]*variable.Variable{
"another_lookup": {
Lookup: &variable.Lookup{
Cluster: "cluster",
},
},
"lookup": {
Lookup: &variable.Lookup{
Cluster: "cluster-${var.another_lookup}",
},
},
},
},
}
m := mocks.NewMockWorkspaceClient(t)
b.SetWorkpaceClient(m.WorkspaceClient)
diags := bundle.Apply(context.Background(), b, bundle.Seq(ResolveVariableReferencesInLookup(), ResolveResourceReferences()))
require.ErrorContains(t, diags.Error(), "lookup variables cannot contain references to another lookup variables")
}

View File

@ -2,8 +2,10 @@ package mutator
import ( import (
"context" "context"
"fmt"
"github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle"
"github.com/databricks/cli/bundle/config/variable"
"github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/diag"
"github.com/databricks/cli/libs/dyn" "github.com/databricks/cli/libs/dyn"
"github.com/databricks/cli/libs/dyn/convert" "github.com/databricks/cli/libs/dyn/convert"
@ -13,10 +15,50 @@ import (
type resolveVariableReferences struct { type resolveVariableReferences struct {
prefixes []string prefixes []string
pattern dyn.Pattern
lookupFn func(dyn.Value, dyn.Path) (dyn.Value, error)
} }
func ResolveVariableReferences(prefixes ...string) bundle.Mutator { func ResolveVariableReferences(prefixes ...string) bundle.Mutator {
return &resolveVariableReferences{prefixes: prefixes} return &resolveVariableReferences{prefixes: prefixes, lookupFn: lookup}
}
func ResolveVariableReferencesInLookup() bundle.Mutator {
return &resolveVariableReferences{prefixes: []string{
"bundle",
"workspace",
"variables",
}, pattern: dyn.NewPattern(dyn.Key("variables"), dyn.AnyKey(), dyn.Key("lookup")), lookupFn: lookupForVariables}
}
func lookup(v dyn.Value, path dyn.Path) (dyn.Value, error) {
// Future opportunity: if we lookup this path in both the given root
// and the synthesized root, we know if it was explicitly set or implied to be empty.
// Then we can emit a warning if it was not explicitly set.
return dyn.GetByPath(v, path)
}
func lookupForVariables(v dyn.Value, path dyn.Path) (dyn.Value, error) {
if path[0].Key() != "variables" {
return lookup(v, path)
}
varV, err := dyn.GetByPath(v, path[:len(path)-1])
if err != nil {
return dyn.InvalidValue, err
}
var vv variable.Variable
err = convert.ToTyped(&vv, varV)
if err != nil {
return dyn.InvalidValue, err
}
if vv.Lookup != nil && vv.Lookup.String() != "" {
return dyn.InvalidValue, fmt.Errorf("lookup variables cannot contain references to another lookup variables")
}
return lookup(v, path)
} }
func (*resolveVariableReferences) Name() string { func (*resolveVariableReferences) Name() string {
@ -48,37 +90,35 @@ func (m *resolveVariableReferences) Apply(ctx context.Context, b *bundle.Bundle)
// //
// This is consistent with the behavior prior to using the dynamic value system. // This is consistent with the behavior prior to using the dynamic value system.
// //
// We can ignore the diagnostics return valuebecause we know that the dynamic value // We can ignore the diagnostics return value because we know that the dynamic value
// has already been normalized when it was first loaded from the configuration file. // has already been normalized when it was first loaded from the configuration file.
// //
normalized, _ := convert.Normalize(b.Config, root, convert.IncludeMissingFields) normalized, _ := convert.Normalize(b.Config, root, convert.IncludeMissingFields)
lookup := func(path dyn.Path) (dyn.Value, error) {
// Future opportunity: if we lookup this path in both the given root
// and the synthesized root, we know if it was explicitly set or implied to be empty.
// Then we can emit a warning if it was not explicitly set.
return dyn.GetByPath(normalized, path)
}
// Resolve variable references in all values. // If the pattern is nil, we resolve references in the entire configuration.
root, err := dynvar.Resolve(root, func(path dyn.Path) (dyn.Value, error) { root, err := dyn.MapByPattern(root, m.pattern, func(p dyn.Path, v dyn.Value) (dyn.Value, error) {
// Rewrite the shorthand path ${var.foo} into ${variables.foo.value}. // Resolve variable references in all values.
if path.HasPrefix(varPath) && len(path) == 2 { return dynvar.Resolve(v, func(path dyn.Path) (dyn.Value, error) {
path = dyn.NewPath( // Rewrite the shorthand path ${var.foo} into ${variables.foo.value}.
dyn.Key("variables"), if path.HasPrefix(varPath) && len(path) == 2 {
path[1], path = dyn.NewPath(
dyn.Key("value"), dyn.Key("variables"),
) path[1],
} dyn.Key("value"),
)
// Perform resolution only if the path starts with one of the specified prefixes.
for _, prefix := range prefixes {
if path.HasPrefix(prefix) {
return lookup(path)
} }
}
return dyn.InvalidValue, dynvar.ErrSkipResolution // Perform resolution only if the path starts with one of the specified prefixes.
for _, prefix := range prefixes {
if path.HasPrefix(prefix) {
return m.lookupFn(normalized, path)
}
}
return dyn.InvalidValue, dynvar.ErrSkipResolution
})
}) })
if err != nil { if err != nil {
return dyn.InvalidValue, err return dyn.InvalidValue, err
} }

View File

@ -3,8 +3,10 @@ package mutator
import ( import (
"context" "context"
"fmt" "fmt"
"slices"
"github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle"
"github.com/databricks/cli/bundle/config/resources"
"github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/diag"
"github.com/databricks/cli/libs/dyn" "github.com/databricks/cli/libs/dyn"
"github.com/databricks/databricks-sdk-go/service/jobs" "github.com/databricks/databricks-sdk-go/service/jobs"
@ -87,19 +89,12 @@ func validateRunAs(b *bundle.Bundle) diag.Diagnostics {
return nil return nil
} }
func (m *setRunAs) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnostics { func setRunAsForJobs(b *bundle.Bundle) {
// Mutator is a no-op if run_as is not specified in the bundle
runAs := b.Config.RunAs runAs := b.Config.RunAs
if runAs == nil { if runAs == nil {
return nil return
} }
// Assert the run_as configuration is valid in the context of the bundle
if diag := validateRunAs(b); diag != nil {
return diag
}
// Set run_as for jobs
for i := range b.Config.Resources.Jobs { for i := range b.Config.Resources.Jobs {
job := b.Config.Resources.Jobs[i] job := b.Config.Resources.Jobs[i]
if job.RunAs != nil { if job.RunAs != nil {
@ -110,6 +105,63 @@ func (m *setRunAs) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnostics {
UserName: runAs.UserName, UserName: runAs.UserName,
} }
} }
}
// Legacy behavior of run_as for DLT pipelines. Available under the experimental.use_run_as_legacy flag.
// Only available to unblock customers stuck due to breaking changes in https://github.com/databricks/cli/pull/1233
func setPipelineOwnersToRunAsIdentity(b *bundle.Bundle) {
runAs := b.Config.RunAs
if runAs == nil {
return
}
me := b.Config.Workspace.CurrentUser.UserName
// If user deploying the bundle and the one defined in run_as are the same
// Do not add IS_OWNER permission. Current user is implied to be an owner in this case.
// Otherwise, it will fail due to this bug https://github.com/databricks/terraform-provider-databricks/issues/2407
if runAs.UserName == me || runAs.ServicePrincipalName == me {
return
}
for i := range b.Config.Resources.Pipelines {
pipeline := b.Config.Resources.Pipelines[i]
pipeline.Permissions = slices.DeleteFunc(pipeline.Permissions, func(p resources.Permission) bool {
return (runAs.ServicePrincipalName != "" && p.ServicePrincipalName == runAs.ServicePrincipalName) ||
(runAs.UserName != "" && p.UserName == runAs.UserName)
})
pipeline.Permissions = append(pipeline.Permissions, resources.Permission{
Level: "IS_OWNER",
ServicePrincipalName: runAs.ServicePrincipalName,
UserName: runAs.UserName,
})
}
}
func (m *setRunAs) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnostics {
// Mutator is a no-op if run_as is not specified in the bundle
runAs := b.Config.RunAs
if runAs == nil {
return nil
}
if b.Config.Experimental != nil && b.Config.Experimental.UseLegacyRunAs {
setPipelineOwnersToRunAsIdentity(b)
setRunAsForJobs(b)
return diag.Diagnostics{
{
Severity: diag.Warning,
Summary: "You are using the legacy mode of run_as. The support for this mode is experimental and might be removed in a future release of the CLI. In order to run the DLT pipelines in your DAB as the run_as user this mode changes the owners of the pipelines to the run_as identity, which requires the user deploying the bundle to be a workspace admin, and also a Metastore admin if the pipeline target is in UC.",
Path: dyn.MustPathFromString("experimental.use_legacy_run_as"),
Location: b.Config.GetLocation("experimental.use_legacy_run_as"),
},
}
}
// Assert the run_as configuration is valid in the context of the bundle
if err := validateRunAs(b); err != nil {
return err
}
setRunAsForJobs(b)
return nil return nil
} }

View File

@ -178,11 +178,6 @@ func TestRunAsErrorForUnsupportedResources(t *testing.T) {
Config: *r, Config: *r,
} }
diags := bundle.Apply(context.Background(), b, SetRunAs()) diags := bundle.Apply(context.Background(), b, SetRunAs())
assert.Equal(t, diags.Error().Error(), errUnsupportedResourceTypeForRunAs{ assert.Contains(t, diags.Error().Error(), "identity", rt)
resourceType: rt,
resourceLocation: dyn.Location{},
currentUser: "alice",
runAsUser: "bob",
}.Error(), "expected run_as with a different identity than the current deployment user to not supported for resources of type: %s", rt)
} }
} }

View File

@ -152,6 +152,13 @@ func translateNoOp(literal, localFullPath, localRelPath, remotePath string) (str
return localRelPath, nil return localRelPath, nil
} }
func translateNoOpWithPrefix(literal, localFullPath, localRelPath, remotePath string) (string, error) {
if !strings.HasPrefix(localRelPath, ".") {
localRelPath = "." + string(filepath.Separator) + localRelPath
}
return localRelPath, nil
}
func (m *translatePaths) rewriteValue(b *bundle.Bundle, p dyn.Path, v dyn.Value, fn rewriteFunc, dir string) (dyn.Value, error) { func (m *translatePaths) rewriteValue(b *bundle.Bundle, p dyn.Path, v dyn.Value, fn rewriteFunc, dir string) (dyn.Value, error) {
out := v.MustString() out := v.MustString()
err := m.rewritePath(dir, b, &out, fn) err := m.rewritePath(dir, b, &out, fn)

View File

@ -5,39 +5,51 @@ import (
"slices" "slices"
"github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle"
"github.com/databricks/cli/bundle/libraries"
"github.com/databricks/cli/libs/dyn" "github.com/databricks/cli/libs/dyn"
) )
type jobTaskRewritePattern struct { type jobRewritePattern struct {
pattern dyn.Pattern pattern dyn.Pattern
fn rewriteFunc fn rewriteFunc
skipRewrite func(string) bool
} }
func rewritePatterns(base dyn.Pattern) []jobTaskRewritePattern { func noSkipRewrite(string) bool {
return []jobTaskRewritePattern{ return false
}
func rewritePatterns(base dyn.Pattern) []jobRewritePattern {
return []jobRewritePattern{
{ {
base.Append(dyn.Key("notebook_task"), dyn.Key("notebook_path")), base.Append(dyn.Key("notebook_task"), dyn.Key("notebook_path")),
translateNotebookPath, translateNotebookPath,
noSkipRewrite,
}, },
{ {
base.Append(dyn.Key("spark_python_task"), dyn.Key("python_file")), base.Append(dyn.Key("spark_python_task"), dyn.Key("python_file")),
translateFilePath, translateFilePath,
noSkipRewrite,
}, },
{ {
base.Append(dyn.Key("dbt_task"), dyn.Key("project_directory")), base.Append(dyn.Key("dbt_task"), dyn.Key("project_directory")),
translateDirectoryPath, translateDirectoryPath,
noSkipRewrite,
}, },
{ {
base.Append(dyn.Key("sql_task"), dyn.Key("file"), dyn.Key("path")), base.Append(dyn.Key("sql_task"), dyn.Key("file"), dyn.Key("path")),
translateFilePath, translateFilePath,
noSkipRewrite,
}, },
{ {
base.Append(dyn.Key("libraries"), dyn.AnyIndex(), dyn.Key("whl")), base.Append(dyn.Key("libraries"), dyn.AnyIndex(), dyn.Key("whl")),
translateNoOp, translateNoOp,
noSkipRewrite,
}, },
{ {
base.Append(dyn.Key("libraries"), dyn.AnyIndex(), dyn.Key("jar")), base.Append(dyn.Key("libraries"), dyn.AnyIndex(), dyn.Key("jar")),
translateNoOp, translateNoOp,
noSkipRewrite,
}, },
} }
} }
@ -73,9 +85,28 @@ func (m *translatePaths) applyJobTranslations(b *bundle.Bundle, v dyn.Value) (dy
) )
// Compile list of patterns and their respective rewrite functions. // Compile list of patterns and their respective rewrite functions.
jobEnvironmentsPatterns := []jobRewritePattern{
{
dyn.NewPattern(
dyn.Key("resources"),
dyn.Key("jobs"),
dyn.AnyKey(),
dyn.Key("environments"),
dyn.AnyIndex(),
dyn.Key("spec"),
dyn.Key("dependencies"),
dyn.AnyIndex(),
),
translateNoOpWithPrefix,
func(s string) bool {
return !libraries.IsEnvironmentDependencyLocal(s)
},
},
}
taskPatterns := rewritePatterns(base) taskPatterns := rewritePatterns(base)
forEachPatterns := rewritePatterns(base.Append(dyn.Key("for_each_task"), dyn.Key("task"))) forEachPatterns := rewritePatterns(base.Append(dyn.Key("for_each_task"), dyn.Key("task")))
allPatterns := append(taskPatterns, forEachPatterns...) allPatterns := append(taskPatterns, jobEnvironmentsPatterns...)
allPatterns = append(allPatterns, forEachPatterns...)
for _, t := range allPatterns { for _, t := range allPatterns {
v, err = dyn.MapByPattern(v, t.pattern, func(p dyn.Path, v dyn.Value) (dyn.Value, error) { v, err = dyn.MapByPattern(v, t.pattern, func(p dyn.Path, v dyn.Value) (dyn.Value, error) {
@ -91,6 +122,10 @@ func (m *translatePaths) applyJobTranslations(b *bundle.Bundle, v dyn.Value) (dy
return dyn.InvalidValue, fmt.Errorf("unable to determine directory for job %s: %w", key, err) return dyn.InvalidValue, fmt.Errorf("unable to determine directory for job %s: %w", key, err)
} }
sv := v.MustString()
if t.skipRewrite(sv) {
return v, nil
}
return m.rewriteRelativeTo(b, p, v, t.fn, dir, fallback[key]) return m.rewriteRelativeTo(b, p, v, t.fn, dir, fallback[key])
}) })
if err != nil { if err != nil {

View File

@ -4,6 +4,7 @@ import (
"context" "context"
"os" "os"
"path/filepath" "path/filepath"
"strings"
"testing" "testing"
"github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle"
@ -651,3 +652,45 @@ func TestPipelineFileLibraryWithNotebookSourceError(t *testing.T) {
diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths()) diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
assert.ErrorContains(t, diags.Error(), `expected a file for "resources.pipelines.pipeline.libraries[0].file.path" but got a notebook`) assert.ErrorContains(t, diags.Error(), `expected a file for "resources.pipelines.pipeline.libraries[0].file.path" but got a notebook`)
} }
func TestTranslatePathJobEnvironments(t *testing.T) {
dir := t.TempDir()
touchEmptyFile(t, filepath.Join(dir, "env1.py"))
touchEmptyFile(t, filepath.Join(dir, "env2.py"))
b := &bundle.Bundle{
RootPath: dir,
Config: config.Root{
Resources: config.Resources{
Jobs: map[string]*resources.Job{
"job": {
JobSettings: &jobs.JobSettings{
Environments: []jobs.JobEnvironment{
{
Spec: &compute.Environment{
Dependencies: []string{
"./dist/env1.whl",
"../dist/env2.whl",
"simplejson",
"/Workspace/Users/foo@bar.com/test.whl",
},
},
},
},
},
},
},
},
},
}
bundletest.SetLocation(b, "resources.jobs", filepath.Join(dir, "job/resource.yml"))
diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
require.NoError(t, diags.Error())
assert.Equal(t, strings.Join([]string{".", "job", "dist", "env1.whl"}, string(os.PathSeparator)), b.Config.Resources.Jobs["job"].JobSettings.Environments[0].Spec.Dependencies[0])
assert.Equal(t, strings.Join([]string{".", "dist", "env2.whl"}, string(os.PathSeparator)), b.Config.Resources.Jobs["job"].JobSettings.Environments[0].Spec.Dependencies[1])
assert.Equal(t, "simplejson", b.Config.Resources.Jobs["job"].JobSettings.Environments[0].Spec.Dependencies[2])
assert.Equal(t, "/Workspace/Users/foo@bar.com/test.whl", b.Config.Resources.Jobs["job"].JobSettings.Environments[0].Spec.Dependencies[3])
}

View File

@ -143,6 +143,18 @@ func (r *Root) updateWithDynamicValue(nv dyn.Value) error {
return nil return nil
} }
// Mutate applies a transformation to the dynamic configuration value of a Root object.
//
// Parameters:
// - fn: A function that mutates a dyn.Value object
//
// Example usage, setting bundle.deployment.lock.enabled to false:
//
// err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) {
// return dyn.Map(v, "bundle.deployment.lock", func(_ dyn.Path, v dyn.Value) (dyn.Value, error) {
// return dyn.Set(v, "enabled", dyn.V(false))
// })
// })
func (r *Root) Mutate(fn func(dyn.Value) (dyn.Value, error)) error { func (r *Root) Mutate(fn func(dyn.Value) (dyn.Value, error)) error {
err := r.initializeDynamicValue() err := r.initializeDynamicValue()
if err != nil { if err != nil {
@ -440,7 +452,7 @@ func validateVariableOverrides(root, target dyn.Value) (err error) {
// Best effort to get the location of configuration value at the specified path. // Best effort to get the location of configuration value at the specified path.
// This function is useful to annotate error messages with the location, because // This function is useful to annotate error messages with the location, because
// we don't want to fail with a different error message if we cannot retrieve the location. // we don't want to fail with a different error message if we cannot retrieve the location.
func (r *Root) GetLocation(path string) dyn.Location { func (r Root) GetLocation(path string) dyn.Location {
v, err := dyn.Get(r.value, path) v, err := dyn.Get(r.value, path)
if err != nil { if err != nil {
return dyn.Location{} return dyn.Location{}

View File

@ -0,0 +1,54 @@
package validate
import (
"context"
"github.com/databricks/cli/bundle"
"github.com/databricks/cli/bundle/deploy/files"
"github.com/databricks/cli/libs/diag"
)
func FilesToSync() bundle.ReadOnlyMutator {
return &filesToSync{}
}
type filesToSync struct {
}
func (v *filesToSync) Name() string {
return "validate:files_to_sync"
}
func (v *filesToSync) Apply(ctx context.Context, rb bundle.ReadOnlyBundle) diag.Diagnostics {
sync, err := files.GetSync(ctx, rb)
if err != nil {
return diag.FromErr(err)
}
fl, err := sync.GetFileList(ctx)
if err != nil {
return diag.FromErr(err)
}
if len(fl) != 0 {
return nil
}
diags := diag.Diagnostics{}
if len(rb.Config().Sync.Exclude) == 0 {
diags = diags.Append(diag.Diagnostic{
Severity: diag.Warning,
Summary: "There are no files to sync, please check your .gitignore",
})
} else {
loc := location{path: "sync.exclude", rb: rb}
diags = diags.Append(diag.Diagnostic{
Severity: diag.Warning,
Summary: "There are no files to sync, please check your .gitignore and sync.exclude configuration",
Location: loc.Location(),
Path: loc.Path(),
})
}
return diags
}

View File

@ -0,0 +1,53 @@
package validate
import (
"context"
"fmt"
"github.com/databricks/cli/bundle"
"github.com/databricks/cli/libs/diag"
)
func JobClusterKeyDefined() bundle.ReadOnlyMutator {
return &jobClusterKeyDefined{}
}
type jobClusterKeyDefined struct {
}
func (v *jobClusterKeyDefined) Name() string {
return "validate:job_cluster_key_defined"
}
func (v *jobClusterKeyDefined) Apply(ctx context.Context, rb bundle.ReadOnlyBundle) diag.Diagnostics {
diags := diag.Diagnostics{}
for k, job := range rb.Config().Resources.Jobs {
jobClusterKeys := make(map[string]bool)
for _, cluster := range job.JobClusters {
if cluster.JobClusterKey != "" {
jobClusterKeys[cluster.JobClusterKey] = true
}
}
for index, task := range job.Tasks {
if task.JobClusterKey != "" {
if _, ok := jobClusterKeys[task.JobClusterKey]; !ok {
loc := location{
path: fmt.Sprintf("resources.jobs.%s.tasks[%d].job_cluster_key", k, index),
rb: rb,
}
diags = diags.Append(diag.Diagnostic{
Severity: diag.Warning,
Summary: fmt.Sprintf("job_cluster_key %s is not defined", task.JobClusterKey),
Location: loc.Location(),
Path: loc.Path(),
})
}
}
}
}
return diags
}

View File

@ -0,0 +1,97 @@
package validate
import (
"context"
"testing"
"github.com/databricks/cli/bundle"
"github.com/databricks/cli/bundle/config"
"github.com/databricks/cli/bundle/config/resources"
"github.com/databricks/cli/libs/diag"
"github.com/databricks/databricks-sdk-go/service/jobs"
"github.com/stretchr/testify/require"
)
func TestJobClusterKeyDefined(t *testing.T) {
b := &bundle.Bundle{
Config: config.Root{
Resources: config.Resources{
Jobs: map[string]*resources.Job{
"job1": {
JobSettings: &jobs.JobSettings{
Name: "job1",
JobClusters: []jobs.JobCluster{
{JobClusterKey: "do-not-exist"},
},
Tasks: []jobs.Task{
{JobClusterKey: "do-not-exist"},
},
},
},
},
},
},
}
diags := bundle.ApplyReadOnly(context.Background(), bundle.ReadOnly(b), JobClusterKeyDefined())
require.Len(t, diags, 0)
require.NoError(t, diags.Error())
}
func TestJobClusterKeyNotDefined(t *testing.T) {
b := &bundle.Bundle{
Config: config.Root{
Resources: config.Resources{
Jobs: map[string]*resources.Job{
"job1": {
JobSettings: &jobs.JobSettings{
Name: "job1",
Tasks: []jobs.Task{
{JobClusterKey: "do-not-exist"},
},
},
},
},
},
},
}
diags := bundle.ApplyReadOnly(context.Background(), bundle.ReadOnly(b), JobClusterKeyDefined())
require.Len(t, diags, 1)
require.NoError(t, diags.Error())
require.Equal(t, diags[0].Severity, diag.Warning)
require.Equal(t, diags[0].Summary, "job_cluster_key do-not-exist is not defined")
}
func TestJobClusterKeyDefinedInDifferentJob(t *testing.T) {
b := &bundle.Bundle{
Config: config.Root{
Resources: config.Resources{
Jobs: map[string]*resources.Job{
"job1": {
JobSettings: &jobs.JobSettings{
Name: "job1",
Tasks: []jobs.Task{
{JobClusterKey: "do-not-exist"},
},
},
},
"job2": {
JobSettings: &jobs.JobSettings{
Name: "job2",
JobClusters: []jobs.JobCluster{
{JobClusterKey: "do-not-exist"},
},
},
},
},
},
},
}
diags := bundle.ApplyReadOnly(context.Background(), bundle.ReadOnly(b), JobClusterKeyDefined())
require.Len(t, diags, 1)
require.NoError(t, diags.Error())
require.Equal(t, diags[0].Severity, diag.Warning)
require.Equal(t, diags[0].Summary, "job_cluster_key do-not-exist is not defined")
}

View File

@ -0,0 +1,43 @@
package validate
import (
"context"
"github.com/databricks/cli/bundle"
"github.com/databricks/cli/libs/diag"
"github.com/databricks/cli/libs/dyn"
)
type validate struct {
}
type location struct {
path string
rb bundle.ReadOnlyBundle
}
func (l location) Location() dyn.Location {
return l.rb.Config().GetLocation(l.path)
}
func (l location) Path() dyn.Path {
return dyn.MustPathFromString(l.path)
}
// Apply implements bundle.Mutator.
func (v *validate) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
return bundle.ApplyReadOnly(ctx, bundle.ReadOnly(b), bundle.Parallel(
JobClusterKeyDefined(),
FilesToSync(),
ValidateSyncPatterns(),
))
}
// Name implements bundle.Mutator.
func (v *validate) Name() string {
return "validate"
}
func Validate() bundle.Mutator {
return &validate{}
}

View File

@ -0,0 +1,79 @@
package validate
import (
"context"
"fmt"
"sync"
"github.com/databricks/cli/bundle"
"github.com/databricks/cli/libs/diag"
"github.com/databricks/cli/libs/fileset"
"golang.org/x/sync/errgroup"
)
func ValidateSyncPatterns() bundle.ReadOnlyMutator {
return &validateSyncPatterns{}
}
type validateSyncPatterns struct {
}
func (v *validateSyncPatterns) Name() string {
return "validate:validate_sync_patterns"
}
func (v *validateSyncPatterns) Apply(ctx context.Context, rb bundle.ReadOnlyBundle) diag.Diagnostics {
s := rb.Config().Sync
if len(s.Exclude) == 0 && len(s.Include) == 0 {
return nil
}
diags, err := checkPatterns(s.Exclude, "sync.exclude", rb)
if err != nil {
return diag.FromErr(err)
}
includeDiags, err := checkPatterns(s.Include, "sync.include", rb)
if err != nil {
return diag.FromErr(err)
}
return diags.Extend(includeDiags)
}
func checkPatterns(patterns []string, path string, rb bundle.ReadOnlyBundle) (diag.Diagnostics, error) {
var mu sync.Mutex
var errs errgroup.Group
var diags diag.Diagnostics
for i, pattern := range patterns {
index := i
p := pattern
errs.Go(func() error {
fs, err := fileset.NewGlobSet(rb.RootPath(), []string{p})
if err != nil {
return err
}
all, err := fs.All()
if err != nil {
return err
}
if len(all) == 0 {
loc := location{path: fmt.Sprintf("%s[%d]", path, index), rb: rb}
mu.Lock()
diags = diags.Append(diag.Diagnostic{
Severity: diag.Warning,
Summary: fmt.Sprintf("Pattern %s does not match any files", p),
Location: loc.Location(),
Path: loc.Path(),
})
mu.Unlock()
}
return nil
})
}
return diags, errs.Wait()
}

View File

@ -3,10 +3,12 @@ package files
import ( import (
"context" "context"
"fmt" "fmt"
"os"
"github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle"
"github.com/databricks/cli/libs/cmdio" "github.com/databricks/cli/libs/cmdio"
"github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/diag"
"github.com/databricks/cli/libs/sync"
"github.com/databricks/databricks-sdk-go/service/workspace" "github.com/databricks/databricks-sdk-go/service/workspace"
"github.com/fatih/color" "github.com/fatih/color"
) )
@ -46,20 +48,31 @@ func (m *delete) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
} }
// Clean up sync snapshot file // Clean up sync snapshot file
sync, err := GetSync(ctx, b) err = deleteSnapshotFile(ctx, b)
if err != nil {
return diag.FromErr(err)
}
err = sync.DestroySnapshot(ctx)
if err != nil { if err != nil {
return diag.FromErr(err) return diag.FromErr(err)
} }
cmdio.LogString(ctx, fmt.Sprintf("Deleted snapshot file at %s", sync.SnapshotPath()))
cmdio.LogString(ctx, "Successfully deleted files!") cmdio.LogString(ctx, "Successfully deleted files!")
return nil return nil
} }
func deleteSnapshotFile(ctx context.Context, b *bundle.Bundle) error {
opts, err := GetSyncOptions(ctx, bundle.ReadOnly(b))
if err != nil {
return fmt.Errorf("cannot get sync options: %w", err)
}
sp, err := sync.SnapshotPath(opts)
if err != nil {
return err
}
err = os.Remove(sp)
if err != nil && !os.IsNotExist(err) {
return fmt.Errorf("failed to destroy sync snapshot file: %s", err)
}
return nil
}
func Delete() bundle.Mutator { func Delete() bundle.Mutator {
return &delete{} return &delete{}
} }

View File

@ -8,40 +8,40 @@ import (
"github.com/databricks/cli/libs/sync" "github.com/databricks/cli/libs/sync"
) )
func GetSync(ctx context.Context, b *bundle.Bundle) (*sync.Sync, error) { func GetSync(ctx context.Context, rb bundle.ReadOnlyBundle) (*sync.Sync, error) {
opts, err := GetSyncOptions(ctx, b) opts, err := GetSyncOptions(ctx, rb)
if err != nil { if err != nil {
return nil, fmt.Errorf("cannot get sync options: %w", err) return nil, fmt.Errorf("cannot get sync options: %w", err)
} }
return sync.New(ctx, *opts) return sync.New(ctx, *opts)
} }
func GetSyncOptions(ctx context.Context, b *bundle.Bundle) (*sync.SyncOptions, error) { func GetSyncOptions(ctx context.Context, rb bundle.ReadOnlyBundle) (*sync.SyncOptions, error) {
cacheDir, err := b.CacheDir(ctx) cacheDir, err := rb.CacheDir(ctx)
if err != nil { if err != nil {
return nil, fmt.Errorf("cannot get bundle cache directory: %w", err) return nil, fmt.Errorf("cannot get bundle cache directory: %w", err)
} }
includes, err := b.GetSyncIncludePatterns(ctx) includes, err := rb.GetSyncIncludePatterns(ctx)
if err != nil { if err != nil {
return nil, fmt.Errorf("cannot get list of sync includes: %w", err) return nil, fmt.Errorf("cannot get list of sync includes: %w", err)
} }
opts := &sync.SyncOptions{ opts := &sync.SyncOptions{
LocalPath: b.RootPath, LocalPath: rb.RootPath(),
RemotePath: b.Config.Workspace.FilePath, RemotePath: rb.Config().Workspace.FilePath,
Include: includes, Include: includes,
Exclude: b.Config.Sync.Exclude, Exclude: rb.Config().Sync.Exclude,
Host: b.WorkspaceClient().Config.Host, Host: rb.WorkspaceClient().Config.Host,
Full: false, Full: false,
SnapshotBasePath: cacheDir, SnapshotBasePath: cacheDir,
WorkspaceClient: b.WorkspaceClient(), WorkspaceClient: rb.WorkspaceClient(),
} }
if b.Config.Workspace.CurrentUser != nil { if rb.Config().Workspace.CurrentUser != nil {
opts.CurrentUser = b.Config.Workspace.CurrentUser.User opts.CurrentUser = rb.Config().Workspace.CurrentUser.User
} }
return opts, nil return opts, nil

View File

@ -21,7 +21,7 @@ func (m *upload) Name() string {
func (m *upload) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { func (m *upload) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
cmdio.LogString(ctx, fmt.Sprintf("Uploading bundle files to %s...", b.Config.Workspace.FilePath)) cmdio.LogString(ctx, fmt.Sprintf("Uploading bundle files to %s...", b.Config.Workspace.FilePath))
sync, err := GetSync(ctx, b) sync, err := GetSync(ctx, bundle.ReadOnly(b))
if err != nil { if err != nil {
return diag.FromErr(err) return diag.FromErr(err)
} }

View File

@ -79,7 +79,7 @@ func (s *statePull) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostic
} }
// Create a new snapshot based on the deployment state file. // Create a new snapshot based on the deployment state file.
opts, err := files.GetSyncOptions(ctx, b) opts, err := files.GetSyncOptions(ctx, bundle.ReadOnly(b))
if err != nil { if err != nil {
return diag.FromErr(err) return diag.FromErr(err)
} }

View File

@ -6,7 +6,6 @@ import (
"encoding/json" "encoding/json"
"io" "io"
"os" "os"
"path/filepath"
"testing" "testing"
"github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle"
@ -77,15 +76,15 @@ func testStatePull(t *testing.T, opts statePullOpts) {
ctx := context.Background() ctx := context.Background()
for _, file := range opts.localFiles { for _, file := range opts.localFiles {
testutil.Touch(t, filepath.Join(b.RootPath, "bar"), file) testutil.Touch(t, b.RootPath, "bar", file)
} }
for _, file := range opts.localNotebooks { for _, file := range opts.localNotebooks {
testutil.TouchNotebook(t, filepath.Join(b.RootPath, "bar"), file) testutil.TouchNotebook(t, b.RootPath, "bar", file)
} }
if opts.withExistingSnapshot { if opts.withExistingSnapshot {
opts, err := files.GetSyncOptions(ctx, b) opts, err := files.GetSyncOptions(ctx, bundle.ReadOnly(b))
require.NoError(t, err) require.NoError(t, err)
snapshotPath, err := sync.SnapshotPath(opts) snapshotPath, err := sync.SnapshotPath(opts)
@ -127,7 +126,7 @@ func testStatePull(t *testing.T, opts statePullOpts) {
} }
if opts.expects.snapshotState != nil { if opts.expects.snapshotState != nil {
syncOpts, err := files.GetSyncOptions(ctx, b) syncOpts, err := files.GetSyncOptions(ctx, bundle.ReadOnly(b))
require.NoError(t, err) require.NoError(t, err)
snapshotPath, err := sync.SnapshotPath(syncOpts) snapshotPath, err := sync.SnapshotPath(syncOpts)

View File

@ -39,7 +39,7 @@ func (s *stateUpdate) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnost
state.Version = DeploymentStateVersion state.Version = DeploymentStateVersion
// Get the current file list. // Get the current file list.
sync, err := files.GetSync(ctx, b) sync, err := files.GetSync(ctx, bundle.ReadOnly(b))
if err != nil { if err != nil {
return diag.FromErr(err) return diag.FromErr(err)
} }

View File

@ -399,7 +399,7 @@ func TestGetEnvVarWithMatchingVersion(t *testing.T) {
versionVarName := "FOO_VERSION" versionVarName := "FOO_VERSION"
tmp := t.TempDir() tmp := t.TempDir()
testutil.Touch(t, tmp, "bar") file := testutil.Touch(t, tmp, "bar")
var tc = []struct { var tc = []struct {
envValue string envValue string
@ -408,19 +408,19 @@ func TestGetEnvVarWithMatchingVersion(t *testing.T) {
expected string expected string
}{ }{
{ {
envValue: filepath.Join(tmp, "bar"), envValue: file,
versionValue: "1.2.3", versionValue: "1.2.3",
currentVersion: "1.2.3", currentVersion: "1.2.3",
expected: filepath.Join(tmp, "bar"), expected: file,
}, },
{ {
envValue: filepath.Join(tmp, "does-not-exist"), envValue: "does-not-exist",
versionValue: "1.2.3", versionValue: "1.2.3",
currentVersion: "1.2.3", currentVersion: "1.2.3",
expected: "", expected: "",
}, },
{ {
envValue: filepath.Join(tmp, "bar"), envValue: file,
versionValue: "1.2.3", versionValue: "1.2.3",
currentVersion: "1.2.4", currentVersion: "1.2.4",
expected: "", expected: "",
@ -432,10 +432,10 @@ func TestGetEnvVarWithMatchingVersion(t *testing.T) {
expected: "", expected: "",
}, },
{ {
envValue: filepath.Join(tmp, "bar"), envValue: file,
versionValue: "", versionValue: "",
currentVersion: "1.2.3", currentVersion: "1.2.3",
expected: filepath.Join(tmp, "bar"), expected: file,
}, },
} }

View File

@ -15,18 +15,40 @@ const TerraformVersionEnv = "DATABRICKS_TF_VERSION"
const TerraformCliConfigPathEnv = "DATABRICKS_TF_CLI_CONFIG_FILE" const TerraformCliConfigPathEnv = "DATABRICKS_TF_CLI_CONFIG_FILE"
const TerraformProviderVersionEnv = "DATABRICKS_TF_PROVIDER_VERSION" const TerraformProviderVersionEnv = "DATABRICKS_TF_PROVIDER_VERSION"
// Terraform CLI version to use and the corresponding checksums for it. The
// checksums are used to verify the integrity of the downloaded binary. Please
// update the checksums when the Terraform version is updated. The checksums
// were obtained from https://releases.hashicorp.com/terraform/1.5.5.
//
// These hashes are not used inside the CLI. They are only co-located here to be
// output in the "databricks bundle debug terraform" output. Downstream applications
// like the CLI docker image use these checksums to verify the integrity of the
// downloaded Terraform archive.
var TerraformVersion = version.Must(version.NewVersion("1.5.5")) var TerraformVersion = version.Must(version.NewVersion("1.5.5"))
const checksumLinuxArm64 = "b055aefe343d0b710d8a7afd31aeb702b37bbf4493bb9385a709991e48dfbcd2"
const checksumLinuxAmd64 = "ad0c696c870c8525357b5127680cd79c0bdf58179af9acd091d43b1d6482da4a"
type Checksum struct {
LinuxArm64 string `json:"linux_arm64"`
LinuxAmd64 string `json:"linux_amd64"`
}
type TerraformMetadata struct { type TerraformMetadata struct {
Version string `json:"version"` Version string `json:"version"`
ProviderHost string `json:"providerHost"` Checksum Checksum `json:"checksum"`
ProviderSource string `json:"providerSource"` ProviderHost string `json:"providerHost"`
ProviderVersion string `json:"providerVersion"` ProviderSource string `json:"providerSource"`
ProviderVersion string `json:"providerVersion"`
} }
func NewTerraformMetadata() *TerraformMetadata { func NewTerraformMetadata() *TerraformMetadata {
return &TerraformMetadata{ return &TerraformMetadata{
Version: TerraformVersion.String(), Version: TerraformVersion.String(),
Checksum: Checksum{
LinuxArm64: checksumLinuxArm64,
LinuxAmd64: checksumLinuxAmd64,
},
ProviderHost: schema.ProviderHost, ProviderHost: schema.ProviderHost,
ProviderSource: schema.ProviderSource, ProviderSource: schema.ProviderSource,
ProviderVersion: schema.ProviderVersion, ProviderVersion: schema.ProviderVersion,

View File

@ -0,0 +1,51 @@
package terraform
import (
"crypto/sha256"
"encoding/hex"
"fmt"
"io"
"net/http"
"os"
"path/filepath"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func downloadAndChecksum(t *testing.T, url string, expectedChecksum string) {
resp, err := http.Get(url)
require.NoError(t, err)
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
t.Fatalf("failed to download %s: %s", url, resp.Status)
}
tmpDir := t.TempDir()
tmpFile, err := os.Create(filepath.Join(tmpDir, "archive.zip"))
require.NoError(t, err)
defer tmpFile.Close()
_, err = io.Copy(tmpFile, resp.Body)
require.NoError(t, err)
_, err = tmpFile.Seek(0, 0) // go back to the start of the file
require.NoError(t, err)
hash := sha256.New()
_, err = io.Copy(hash, tmpFile)
require.NoError(t, err)
checksum := hex.EncodeToString(hash.Sum(nil))
assert.Equal(t, expectedChecksum, checksum)
}
func TestTerraformArchiveChecksums(t *testing.T) {
armUrl := fmt.Sprintf("https://releases.hashicorp.com/terraform/%s/terraform_%s_linux_arm64.zip", TerraformVersion, TerraformVersion)
amdUrl := fmt.Sprintf("https://releases.hashicorp.com/terraform/%s/terraform_%s_linux_amd64.zip", TerraformVersion, TerraformVersion)
downloadAndChecksum(t, amdUrl, checksumLinuxAmd64)
downloadAndChecksum(t, armUrl, checksumLinuxArm64)
}

View File

@ -24,6 +24,7 @@ func convertJobResource(ctx context.Context, vin dyn.Value) (dyn.Value, error) {
"tasks": "task", "tasks": "task",
"job_clusters": "job_cluster", "job_clusters": "job_cluster",
"parameters": "parameter", "parameters": "parameter",
"environments": "environment",
}) })
if err != nil { if err != nil {
return dyn.InvalidValue, err return dyn.InvalidValue, err

View File

@ -1,3 +1,3 @@
package schema package schema
const ProviderVersion = "1.39.0" const ProviderVersion = "1.40.0"

View File

@ -0,0 +1,36 @@
// Generated from Databricks Terraform provider schema. DO NOT EDIT.
package schema
type DataSourceExternalLocationExternalLocationInfoEncryptionDetailsSseEncryptionDetails struct {
Algorithm string `json:"algorithm,omitempty"`
AwsKmsKeyArn string `json:"aws_kms_key_arn,omitempty"`
}
type DataSourceExternalLocationExternalLocationInfoEncryptionDetails struct {
SseEncryptionDetails *DataSourceExternalLocationExternalLocationInfoEncryptionDetailsSseEncryptionDetails `json:"sse_encryption_details,omitempty"`
}
type DataSourceExternalLocationExternalLocationInfo struct {
AccessPoint string `json:"access_point,omitempty"`
BrowseOnly bool `json:"browse_only,omitempty"`
Comment string `json:"comment,omitempty"`
CreatedAt int `json:"created_at,omitempty"`
CreatedBy string `json:"created_by,omitempty"`
CredentialId string `json:"credential_id,omitempty"`
CredentialName string `json:"credential_name,omitempty"`
MetastoreId string `json:"metastore_id,omitempty"`
Name string `json:"name,omitempty"`
Owner string `json:"owner,omitempty"`
ReadOnly bool `json:"read_only,omitempty"`
UpdatedAt int `json:"updated_at,omitempty"`
UpdatedBy string `json:"updated_by,omitempty"`
Url string `json:"url,omitempty"`
EncryptionDetails *DataSourceExternalLocationExternalLocationInfoEncryptionDetails `json:"encryption_details,omitempty"`
}
type DataSourceExternalLocation struct {
Id string `json:"id,omitempty"`
Name string `json:"name"`
ExternalLocationInfo *DataSourceExternalLocationExternalLocationInfo `json:"external_location_info,omitempty"`
}

View File

@ -0,0 +1,8 @@
// Generated from Databricks Terraform provider schema. DO NOT EDIT.
package schema
type DataSourceExternalLocations struct {
Id string `json:"id,omitempty"`
Names []string `json:"names,omitempty"`
}

View File

@ -27,6 +27,7 @@ type DataSourceInstancePoolPoolInfoDiskSpec struct {
type DataSourceInstancePoolPoolInfoGcpAttributes struct { type DataSourceInstancePoolPoolInfoGcpAttributes struct {
GcpAvailability string `json:"gcp_availability,omitempty"` GcpAvailability string `json:"gcp_availability,omitempty"`
LocalSsdCount int `json:"local_ssd_count,omitempty"` LocalSsdCount int `json:"local_ssd_count,omitempty"`
ZoneId string `json:"zone_id,omitempty"`
} }
type DataSourceInstancePoolPoolInfoInstancePoolFleetAttributesFleetOnDemandOption struct { type DataSourceInstancePoolPoolInfoInstancePoolFleetAttributesFleetOnDemandOption struct {

View File

@ -2,15 +2,6 @@
package schema package schema
type DataSourceJobJobSettingsSettingsComputeSpec struct {
Kind string `json:"kind,omitempty"`
}
type DataSourceJobJobSettingsSettingsCompute struct {
ComputeKey string `json:"compute_key,omitempty"`
Spec *DataSourceJobJobSettingsSettingsComputeSpec `json:"spec,omitempty"`
}
type DataSourceJobJobSettingsSettingsContinuous struct { type DataSourceJobJobSettingsSettingsContinuous struct {
PauseStatus string `json:"pause_status,omitempty"` PauseStatus string `json:"pause_status,omitempty"`
} }
@ -38,6 +29,16 @@ type DataSourceJobJobSettingsSettingsEmailNotifications struct {
OnSuccess []string `json:"on_success,omitempty"` OnSuccess []string `json:"on_success,omitempty"`
} }
type DataSourceJobJobSettingsSettingsEnvironmentSpec struct {
Client string `json:"client"`
Dependencies []string `json:"dependencies,omitempty"`
}
type DataSourceJobJobSettingsSettingsEnvironment struct {
EnvironmentKey string `json:"environment_key"`
Spec *DataSourceJobJobSettingsSettingsEnvironmentSpec `json:"spec,omitempty"`
}
type DataSourceJobJobSettingsSettingsGitSourceJobSource struct { type DataSourceJobJobSettingsSettingsGitSourceJobSource struct {
DirtyState string `json:"dirty_state,omitempty"` DirtyState string `json:"dirty_state,omitempty"`
ImportFromGitBranch string `json:"import_from_git_branch"` ImportFromGitBranch string `json:"import_from_git_branch"`
@ -411,6 +412,7 @@ type DataSourceJobJobSettingsSettingsNotebookTask struct {
BaseParameters map[string]string `json:"base_parameters,omitempty"` BaseParameters map[string]string `json:"base_parameters,omitempty"`
NotebookPath string `json:"notebook_path"` NotebookPath string `json:"notebook_path"`
Source string `json:"source,omitempty"` Source string `json:"source,omitempty"`
WarehouseId string `json:"warehouse_id,omitempty"`
} }
type DataSourceJobJobSettingsSettingsNotificationSettings struct { type DataSourceJobJobSettingsSettingsNotificationSettings struct {
@ -725,6 +727,7 @@ type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNotebookTask struct {
BaseParameters map[string]string `json:"base_parameters,omitempty"` BaseParameters map[string]string `json:"base_parameters,omitempty"`
NotebookPath string `json:"notebook_path"` NotebookPath string `json:"notebook_path"`
Source string `json:"source,omitempty"` Source string `json:"source,omitempty"`
WarehouseId string `json:"warehouse_id,omitempty"`
} }
type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNotificationSettings struct { type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNotificationSettings struct {
@ -831,8 +834,8 @@ type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotifications str
} }
type DataSourceJobJobSettingsSettingsTaskForEachTaskTask struct { type DataSourceJobJobSettingsSettingsTaskForEachTaskTask struct {
ComputeKey string `json:"compute_key,omitempty"`
Description string `json:"description,omitempty"` Description string `json:"description,omitempty"`
EnvironmentKey string `json:"environment_key,omitempty"`
ExistingClusterId string `json:"existing_cluster_id,omitempty"` ExistingClusterId string `json:"existing_cluster_id,omitempty"`
JobClusterKey string `json:"job_cluster_key,omitempty"` JobClusterKey string `json:"job_cluster_key,omitempty"`
MaxRetries int `json:"max_retries,omitempty"` MaxRetries int `json:"max_retries,omitempty"`
@ -1062,6 +1065,7 @@ type DataSourceJobJobSettingsSettingsTaskNotebookTask struct {
BaseParameters map[string]string `json:"base_parameters,omitempty"` BaseParameters map[string]string `json:"base_parameters,omitempty"`
NotebookPath string `json:"notebook_path"` NotebookPath string `json:"notebook_path"`
Source string `json:"source,omitempty"` Source string `json:"source,omitempty"`
WarehouseId string `json:"warehouse_id,omitempty"`
} }
type DataSourceJobJobSettingsSettingsTaskNotificationSettings struct { type DataSourceJobJobSettingsSettingsTaskNotificationSettings struct {
@ -1168,8 +1172,8 @@ type DataSourceJobJobSettingsSettingsTaskWebhookNotifications struct {
} }
type DataSourceJobJobSettingsSettingsTask struct { type DataSourceJobJobSettingsSettingsTask struct {
ComputeKey string `json:"compute_key,omitempty"`
Description string `json:"description,omitempty"` Description string `json:"description,omitempty"`
EnvironmentKey string `json:"environment_key,omitempty"`
ExistingClusterId string `json:"existing_cluster_id,omitempty"` ExistingClusterId string `json:"existing_cluster_id,omitempty"`
JobClusterKey string `json:"job_cluster_key,omitempty"` JobClusterKey string `json:"job_cluster_key,omitempty"`
MaxRetries int `json:"max_retries,omitempty"` MaxRetries int `json:"max_retries,omitempty"`
@ -1252,11 +1256,11 @@ type DataSourceJobJobSettingsSettings struct {
RetryOnTimeout bool `json:"retry_on_timeout,omitempty"` RetryOnTimeout bool `json:"retry_on_timeout,omitempty"`
Tags map[string]string `json:"tags,omitempty"` Tags map[string]string `json:"tags,omitempty"`
TimeoutSeconds int `json:"timeout_seconds,omitempty"` TimeoutSeconds int `json:"timeout_seconds,omitempty"`
Compute []DataSourceJobJobSettingsSettingsCompute `json:"compute,omitempty"`
Continuous *DataSourceJobJobSettingsSettingsContinuous `json:"continuous,omitempty"` Continuous *DataSourceJobJobSettingsSettingsContinuous `json:"continuous,omitempty"`
DbtTask *DataSourceJobJobSettingsSettingsDbtTask `json:"dbt_task,omitempty"` DbtTask *DataSourceJobJobSettingsSettingsDbtTask `json:"dbt_task,omitempty"`
Deployment *DataSourceJobJobSettingsSettingsDeployment `json:"deployment,omitempty"` Deployment *DataSourceJobJobSettingsSettingsDeployment `json:"deployment,omitempty"`
EmailNotifications *DataSourceJobJobSettingsSettingsEmailNotifications `json:"email_notifications,omitempty"` EmailNotifications *DataSourceJobJobSettingsSettingsEmailNotifications `json:"email_notifications,omitempty"`
Environment []DataSourceJobJobSettingsSettingsEnvironment `json:"environment,omitempty"`
GitSource *DataSourceJobJobSettingsSettingsGitSource `json:"git_source,omitempty"` GitSource *DataSourceJobJobSettingsSettingsGitSource `json:"git_source,omitempty"`
Health *DataSourceJobJobSettingsSettingsHealth `json:"health,omitempty"` Health *DataSourceJobJobSettingsSettingsHealth `json:"health,omitempty"`
JobCluster []DataSourceJobJobSettingsSettingsJobCluster `json:"job_cluster,omitempty"` JobCluster []DataSourceJobJobSettingsSettingsJobCluster `json:"job_cluster,omitempty"`

View File

@ -25,6 +25,8 @@ type DataSourceMetastoreMetastoreInfo struct {
type DataSourceMetastore struct { type DataSourceMetastore struct {
Id string `json:"id,omitempty"` Id string `json:"id,omitempty"`
MetastoreId string `json:"metastore_id"` MetastoreId string `json:"metastore_id,omitempty"`
Name string `json:"name,omitempty"`
Region string `json:"region,omitempty"`
MetastoreInfo *DataSourceMetastoreMetastoreInfo `json:"metastore_info,omitempty"` MetastoreInfo *DataSourceMetastoreMetastoreInfo `json:"metastore_info,omitempty"`
} }

View File

@ -17,6 +17,8 @@ type DataSources struct {
DbfsFile map[string]any `json:"databricks_dbfs_file,omitempty"` DbfsFile map[string]any `json:"databricks_dbfs_file,omitempty"`
DbfsFilePaths map[string]any `json:"databricks_dbfs_file_paths,omitempty"` DbfsFilePaths map[string]any `json:"databricks_dbfs_file_paths,omitempty"`
Directory map[string]any `json:"databricks_directory,omitempty"` Directory map[string]any `json:"databricks_directory,omitempty"`
ExternalLocation map[string]any `json:"databricks_external_location,omitempty"`
ExternalLocations map[string]any `json:"databricks_external_locations,omitempty"`
Group map[string]any `json:"databricks_group,omitempty"` Group map[string]any `json:"databricks_group,omitempty"`
InstancePool map[string]any `json:"databricks_instance_pool,omitempty"` InstancePool map[string]any `json:"databricks_instance_pool,omitempty"`
InstanceProfiles map[string]any `json:"databricks_instance_profiles,omitempty"` InstanceProfiles map[string]any `json:"databricks_instance_profiles,omitempty"`
@ -64,6 +66,8 @@ func NewDataSources() *DataSources {
DbfsFile: make(map[string]any), DbfsFile: make(map[string]any),
DbfsFilePaths: make(map[string]any), DbfsFilePaths: make(map[string]any),
Directory: make(map[string]any), Directory: make(map[string]any),
ExternalLocation: make(map[string]any),
ExternalLocations: make(map[string]any),
Group: make(map[string]any), Group: make(map[string]any),
InstancePool: make(map[string]any), InstancePool: make(map[string]any),
InstanceProfiles: make(map[string]any), InstanceProfiles: make(map[string]any),

View File

@ -3,17 +3,18 @@
package schema package schema
type ResourceCatalog struct { type ResourceCatalog struct {
Comment string `json:"comment,omitempty"` Comment string `json:"comment,omitempty"`
ConnectionName string `json:"connection_name,omitempty"` ConnectionName string `json:"connection_name,omitempty"`
ForceDestroy bool `json:"force_destroy,omitempty"` EnablePredictiveOptimization string `json:"enable_predictive_optimization,omitempty"`
Id string `json:"id,omitempty"` ForceDestroy bool `json:"force_destroy,omitempty"`
IsolationMode string `json:"isolation_mode,omitempty"` Id string `json:"id,omitempty"`
MetastoreId string `json:"metastore_id,omitempty"` IsolationMode string `json:"isolation_mode,omitempty"`
Name string `json:"name"` MetastoreId string `json:"metastore_id,omitempty"`
Options map[string]string `json:"options,omitempty"` Name string `json:"name"`
Owner string `json:"owner,omitempty"` Options map[string]string `json:"options,omitempty"`
Properties map[string]string `json:"properties,omitempty"` Owner string `json:"owner,omitempty"`
ProviderName string `json:"provider_name,omitempty"` Properties map[string]string `json:"properties,omitempty"`
ShareName string `json:"share_name,omitempty"` ProviderName string `json:"provider_name,omitempty"`
StorageRoot string `json:"storage_root,omitempty"` ShareName string `json:"share_name,omitempty"`
StorageRoot string `json:"storage_root,omitempty"`
} }

View File

@ -32,6 +32,10 @@ type ResourceClusterAzureAttributes struct {
LogAnalyticsInfo *ResourceClusterAzureAttributesLogAnalyticsInfo `json:"log_analytics_info,omitempty"` LogAnalyticsInfo *ResourceClusterAzureAttributesLogAnalyticsInfo `json:"log_analytics_info,omitempty"`
} }
type ResourceClusterCloneFrom struct {
SourceClusterId string `json:"source_cluster_id"`
}
type ResourceClusterClusterLogConfDbfs struct { type ResourceClusterClusterLogConfDbfs struct {
Destination string `json:"destination"` Destination string `json:"destination"`
} }
@ -190,6 +194,7 @@ type ResourceCluster struct {
Autoscale *ResourceClusterAutoscale `json:"autoscale,omitempty"` Autoscale *ResourceClusterAutoscale `json:"autoscale,omitempty"`
AwsAttributes *ResourceClusterAwsAttributes `json:"aws_attributes,omitempty"` AwsAttributes *ResourceClusterAwsAttributes `json:"aws_attributes,omitempty"`
AzureAttributes *ResourceClusterAzureAttributes `json:"azure_attributes,omitempty"` AzureAttributes *ResourceClusterAzureAttributes `json:"azure_attributes,omitempty"`
CloneFrom *ResourceClusterCloneFrom `json:"clone_from,omitempty"`
ClusterLogConf *ResourceClusterClusterLogConf `json:"cluster_log_conf,omitempty"` ClusterLogConf *ResourceClusterClusterLogConf `json:"cluster_log_conf,omitempty"`
ClusterMountInfo []ResourceClusterClusterMountInfo `json:"cluster_mount_info,omitempty"` ClusterMountInfo []ResourceClusterClusterMountInfo `json:"cluster_mount_info,omitempty"`
DockerImage *ResourceClusterDockerImage `json:"docker_image,omitempty"` DockerImage *ResourceClusterDockerImage `json:"docker_image,omitempty"`

View File

@ -27,6 +27,7 @@ type ResourceInstancePoolDiskSpec struct {
type ResourceInstancePoolGcpAttributes struct { type ResourceInstancePoolGcpAttributes struct {
GcpAvailability string `json:"gcp_availability,omitempty"` GcpAvailability string `json:"gcp_availability,omitempty"`
LocalSsdCount int `json:"local_ssd_count,omitempty"` LocalSsdCount int `json:"local_ssd_count,omitempty"`
ZoneId string `json:"zone_id,omitempty"`
} }
type ResourceInstancePoolInstancePoolFleetAttributesFleetOnDemandOption struct { type ResourceInstancePoolInstancePoolFleetAttributesFleetOnDemandOption struct {

View File

@ -2,15 +2,6 @@
package schema package schema
type ResourceJobComputeSpec struct {
Kind string `json:"kind,omitempty"`
}
type ResourceJobCompute struct {
ComputeKey string `json:"compute_key,omitempty"`
Spec *ResourceJobComputeSpec `json:"spec,omitempty"`
}
type ResourceJobContinuous struct { type ResourceJobContinuous struct {
PauseStatus string `json:"pause_status,omitempty"` PauseStatus string `json:"pause_status,omitempty"`
} }
@ -38,6 +29,16 @@ type ResourceJobEmailNotifications struct {
OnSuccess []string `json:"on_success,omitempty"` OnSuccess []string `json:"on_success,omitempty"`
} }
type ResourceJobEnvironmentSpec struct {
Client string `json:"client"`
Dependencies []string `json:"dependencies,omitempty"`
}
type ResourceJobEnvironment struct {
EnvironmentKey string `json:"environment_key"`
Spec *ResourceJobEnvironmentSpec `json:"spec,omitempty"`
}
type ResourceJobGitSourceJobSource struct { type ResourceJobGitSourceJobSource struct {
DirtyState string `json:"dirty_state,omitempty"` DirtyState string `json:"dirty_state,omitempty"`
ImportFromGitBranch string `json:"import_from_git_branch"` ImportFromGitBranch string `json:"import_from_git_branch"`
@ -411,6 +412,7 @@ type ResourceJobNotebookTask struct {
BaseParameters map[string]string `json:"base_parameters,omitempty"` BaseParameters map[string]string `json:"base_parameters,omitempty"`
NotebookPath string `json:"notebook_path"` NotebookPath string `json:"notebook_path"`
Source string `json:"source,omitempty"` Source string `json:"source,omitempty"`
WarehouseId string `json:"warehouse_id,omitempty"`
} }
type ResourceJobNotificationSettings struct { type ResourceJobNotificationSettings struct {
@ -725,6 +727,7 @@ type ResourceJobTaskForEachTaskTaskNotebookTask struct {
BaseParameters map[string]string `json:"base_parameters,omitempty"` BaseParameters map[string]string `json:"base_parameters,omitempty"`
NotebookPath string `json:"notebook_path"` NotebookPath string `json:"notebook_path"`
Source string `json:"source,omitempty"` Source string `json:"source,omitempty"`
WarehouseId string `json:"warehouse_id,omitempty"`
} }
type ResourceJobTaskForEachTaskTaskNotificationSettings struct { type ResourceJobTaskForEachTaskTaskNotificationSettings struct {
@ -831,8 +834,8 @@ type ResourceJobTaskForEachTaskTaskWebhookNotifications struct {
} }
type ResourceJobTaskForEachTaskTask struct { type ResourceJobTaskForEachTaskTask struct {
ComputeKey string `json:"compute_key,omitempty"`
Description string `json:"description,omitempty"` Description string `json:"description,omitempty"`
EnvironmentKey string `json:"environment_key,omitempty"`
ExistingClusterId string `json:"existing_cluster_id,omitempty"` ExistingClusterId string `json:"existing_cluster_id,omitempty"`
JobClusterKey string `json:"job_cluster_key,omitempty"` JobClusterKey string `json:"job_cluster_key,omitempty"`
MaxRetries int `json:"max_retries,omitempty"` MaxRetries int `json:"max_retries,omitempty"`
@ -1062,6 +1065,7 @@ type ResourceJobTaskNotebookTask struct {
BaseParameters map[string]string `json:"base_parameters,omitempty"` BaseParameters map[string]string `json:"base_parameters,omitempty"`
NotebookPath string `json:"notebook_path"` NotebookPath string `json:"notebook_path"`
Source string `json:"source,omitempty"` Source string `json:"source,omitempty"`
WarehouseId string `json:"warehouse_id,omitempty"`
} }
type ResourceJobTaskNotificationSettings struct { type ResourceJobTaskNotificationSettings struct {
@ -1168,8 +1172,8 @@ type ResourceJobTaskWebhookNotifications struct {
} }
type ResourceJobTask struct { type ResourceJobTask struct {
ComputeKey string `json:"compute_key,omitempty"`
Description string `json:"description,omitempty"` Description string `json:"description,omitempty"`
EnvironmentKey string `json:"environment_key,omitempty"`
ExistingClusterId string `json:"existing_cluster_id,omitempty"` ExistingClusterId string `json:"existing_cluster_id,omitempty"`
JobClusterKey string `json:"job_cluster_key,omitempty"` JobClusterKey string `json:"job_cluster_key,omitempty"`
MaxRetries int `json:"max_retries,omitempty"` MaxRetries int `json:"max_retries,omitempty"`
@ -1256,11 +1260,11 @@ type ResourceJob struct {
Tags map[string]string `json:"tags,omitempty"` Tags map[string]string `json:"tags,omitempty"`
TimeoutSeconds int `json:"timeout_seconds,omitempty"` TimeoutSeconds int `json:"timeout_seconds,omitempty"`
Url string `json:"url,omitempty"` Url string `json:"url,omitempty"`
Compute []ResourceJobCompute `json:"compute,omitempty"`
Continuous *ResourceJobContinuous `json:"continuous,omitempty"` Continuous *ResourceJobContinuous `json:"continuous,omitempty"`
DbtTask *ResourceJobDbtTask `json:"dbt_task,omitempty"` DbtTask *ResourceJobDbtTask `json:"dbt_task,omitempty"`
Deployment *ResourceJobDeployment `json:"deployment,omitempty"` Deployment *ResourceJobDeployment `json:"deployment,omitempty"`
EmailNotifications *ResourceJobEmailNotifications `json:"email_notifications,omitempty"` EmailNotifications *ResourceJobEmailNotifications `json:"email_notifications,omitempty"`
Environment []ResourceJobEnvironment `json:"environment,omitempty"`
GitSource *ResourceJobGitSource `json:"git_source,omitempty"` GitSource *ResourceJobGitSource `json:"git_source,omitempty"`
Health *ResourceJobHealth `json:"health,omitempty"` Health *ResourceJobHealth `json:"health,omitempty"`
JobCluster []ResourceJobJobCluster `json:"job_cluster,omitempty"` JobCluster []ResourceJobJobCluster `json:"job_cluster,omitempty"`

View File

@ -3,11 +3,11 @@
package schema package schema
type ResourceLakehouseMonitorCustomMetrics struct { type ResourceLakehouseMonitorCustomMetrics struct {
Definition string `json:"definition,omitempty"` Definition string `json:"definition"`
InputColumns []string `json:"input_columns,omitempty"` InputColumns []string `json:"input_columns"`
Name string `json:"name,omitempty"` Name string `json:"name"`
OutputDataType string `json:"output_data_type,omitempty"` OutputDataType string `json:"output_data_type"`
Type string `json:"type,omitempty"` Type string `json:"type"`
} }
type ResourceLakehouseMonitorDataClassificationConfig struct { type ResourceLakehouseMonitorDataClassificationConfig struct {
@ -15,35 +15,40 @@ type ResourceLakehouseMonitorDataClassificationConfig struct {
} }
type ResourceLakehouseMonitorInferenceLog struct { type ResourceLakehouseMonitorInferenceLog struct {
Granularities []string `json:"granularities,omitempty"` Granularities []string `json:"granularities"`
LabelCol string `json:"label_col,omitempty"` LabelCol string `json:"label_col,omitempty"`
ModelIdCol string `json:"model_id_col,omitempty"` ModelIdCol string `json:"model_id_col"`
PredictionCol string `json:"prediction_col,omitempty"` PredictionCol string `json:"prediction_col"`
PredictionProbaCol string `json:"prediction_proba_col,omitempty"` PredictionProbaCol string `json:"prediction_proba_col,omitempty"`
ProblemType string `json:"problem_type,omitempty"` ProblemType string `json:"problem_type"`
TimestampCol string `json:"timestamp_col,omitempty"` TimestampCol string `json:"timestamp_col"`
} }
type ResourceLakehouseMonitorNotificationsOnFailure struct { type ResourceLakehouseMonitorNotificationsOnFailure struct {
EmailAddresses []string `json:"email_addresses,omitempty"` EmailAddresses []string `json:"email_addresses,omitempty"`
} }
type ResourceLakehouseMonitorNotificationsOnNewClassificationTagDetected struct {
EmailAddresses []string `json:"email_addresses,omitempty"`
}
type ResourceLakehouseMonitorNotifications struct { type ResourceLakehouseMonitorNotifications struct {
OnFailure *ResourceLakehouseMonitorNotificationsOnFailure `json:"on_failure,omitempty"` OnFailure *ResourceLakehouseMonitorNotificationsOnFailure `json:"on_failure,omitempty"`
OnNewClassificationTagDetected *ResourceLakehouseMonitorNotificationsOnNewClassificationTagDetected `json:"on_new_classification_tag_detected,omitempty"`
} }
type ResourceLakehouseMonitorSchedule struct { type ResourceLakehouseMonitorSchedule struct {
PauseStatus string `json:"pause_status,omitempty"` PauseStatus string `json:"pause_status,omitempty"`
QuartzCronExpression string `json:"quartz_cron_expression,omitempty"` QuartzCronExpression string `json:"quartz_cron_expression"`
TimezoneId string `json:"timezone_id,omitempty"` TimezoneId string `json:"timezone_id"`
} }
type ResourceLakehouseMonitorSnapshot struct { type ResourceLakehouseMonitorSnapshot struct {
} }
type ResourceLakehouseMonitorTimeSeries struct { type ResourceLakehouseMonitorTimeSeries struct {
Granularities []string `json:"granularities,omitempty"` Granularities []string `json:"granularities"`
TimestampCol string `json:"timestamp_col,omitempty"` TimestampCol string `json:"timestamp_col"`
} }
type ResourceLakehouseMonitor struct { type ResourceLakehouseMonitor struct {

View File

@ -117,6 +117,11 @@ type ResourcePipelineCluster struct {
InitScripts []ResourcePipelineClusterInitScripts `json:"init_scripts,omitempty"` InitScripts []ResourcePipelineClusterInitScripts `json:"init_scripts,omitempty"`
} }
type ResourcePipelineDeployment struct {
Kind string `json:"kind,omitempty"`
MetadataFilePath string `json:"metadata_file_path,omitempty"`
}
type ResourcePipelineFilters struct { type ResourcePipelineFilters struct {
Exclude []string `json:"exclude,omitempty"` Exclude []string `json:"exclude,omitempty"`
Include []string `json:"include,omitempty"` Include []string `json:"include,omitempty"`
@ -165,6 +170,7 @@ type ResourcePipeline struct {
Target string `json:"target,omitempty"` Target string `json:"target,omitempty"`
Url string `json:"url,omitempty"` Url string `json:"url,omitempty"`
Cluster []ResourcePipelineCluster `json:"cluster,omitempty"` Cluster []ResourcePipelineCluster `json:"cluster,omitempty"`
Deployment *ResourcePipelineDeployment `json:"deployment,omitempty"`
Filters *ResourcePipelineFilters `json:"filters,omitempty"` Filters *ResourcePipelineFilters `json:"filters,omitempty"`
Library []ResourcePipelineLibrary `json:"library,omitempty"` Library []ResourcePipelineLibrary `json:"library,omitempty"`
Notification []ResourcePipelineNotification `json:"notification,omitempty"` Notification []ResourcePipelineNotification `json:"notification,omitempty"`

View File

@ -3,7 +3,11 @@
package schema package schema
type ResourceRecipientIpAccessList struct { type ResourceRecipientIpAccessList struct {
AllowedIpAddresses []string `json:"allowed_ip_addresses"` AllowedIpAddresses []string `json:"allowed_ip_addresses,omitempty"`
}
type ResourceRecipientPropertiesKvpairs struct {
Properties map[string]string `json:"properties"`
} }
type ResourceRecipientTokens struct { type ResourceRecipientTokens struct {
@ -17,13 +21,23 @@ type ResourceRecipientTokens struct {
} }
type ResourceRecipient struct { type ResourceRecipient struct {
AuthenticationType string `json:"authentication_type"` Activated bool `json:"activated,omitempty"`
Comment string `json:"comment,omitempty"` ActivationUrl string `json:"activation_url,omitempty"`
DataRecipientGlobalMetastoreId string `json:"data_recipient_global_metastore_id,omitempty"` AuthenticationType string `json:"authentication_type"`
Id string `json:"id,omitempty"` Cloud string `json:"cloud,omitempty"`
Name string `json:"name"` Comment string `json:"comment,omitempty"`
Owner string `json:"owner,omitempty"` CreatedAt int `json:"created_at,omitempty"`
SharingCode string `json:"sharing_code,omitempty"` CreatedBy string `json:"created_by,omitempty"`
IpAccessList *ResourceRecipientIpAccessList `json:"ip_access_list,omitempty"` DataRecipientGlobalMetastoreId string `json:"data_recipient_global_metastore_id,omitempty"`
Tokens []ResourceRecipientTokens `json:"tokens,omitempty"` Id string `json:"id,omitempty"`
MetastoreId string `json:"metastore_id,omitempty"`
Name string `json:"name"`
Owner string `json:"owner,omitempty"`
Region string `json:"region,omitempty"`
SharingCode string `json:"sharing_code,omitempty"`
UpdatedAt int `json:"updated_at,omitempty"`
UpdatedBy string `json:"updated_by,omitempty"`
IpAccessList *ResourceRecipientIpAccessList `json:"ip_access_list,omitempty"`
PropertiesKvpairs *ResourceRecipientPropertiesKvpairs `json:"properties_kvpairs,omitempty"`
Tokens []ResourceRecipientTokens `json:"tokens,omitempty"`
} }

View File

@ -3,13 +3,14 @@
package schema package schema
type ResourceSchema struct { type ResourceSchema struct {
CatalogName string `json:"catalog_name"` CatalogName string `json:"catalog_name"`
Comment string `json:"comment,omitempty"` Comment string `json:"comment,omitempty"`
ForceDestroy bool `json:"force_destroy,omitempty"` EnablePredictiveOptimization string `json:"enable_predictive_optimization,omitempty"`
Id string `json:"id,omitempty"` ForceDestroy bool `json:"force_destroy,omitempty"`
MetastoreId string `json:"metastore_id,omitempty"` Id string `json:"id,omitempty"`
Name string `json:"name"` MetastoreId string `json:"metastore_id,omitempty"`
Owner string `json:"owner,omitempty"` Name string `json:"name"`
Properties map[string]string `json:"properties,omitempty"` Owner string `json:"owner,omitempty"`
StorageRoot string `json:"storage_root,omitempty"` Properties map[string]string `json:"properties,omitempty"`
StorageRoot string `json:"storage_root,omitempty"`
} }

View File

@ -21,7 +21,7 @@ type Root struct {
const ProviderHost = "registry.terraform.io" const ProviderHost = "registry.terraform.io"
const ProviderSource = "databricks/databricks" const ProviderSource = "databricks/databricks"
const ProviderVersion = "1.39.0" const ProviderVersion = "1.40.0"
func NewRoot() *Root { func NewRoot() *Root {
return &Root{ return &Root{

View File

@ -1,45 +1,71 @@
package libraries package libraries
import ( import (
"context"
"fmt"
"path/filepath"
"github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle"
"github.com/databricks/cli/bundle/config"
"github.com/databricks/cli/libs/cmdio"
"github.com/databricks/cli/libs/log"
"github.com/databricks/databricks-sdk-go/service/compute"
"github.com/databricks/databricks-sdk-go/service/jobs" "github.com/databricks/databricks-sdk-go/service/jobs"
) )
func findAllTasks(b *bundle.Bundle) []*jobs.Task { func findAllTasks(b *bundle.Bundle) map[string]([]jobs.Task) {
r := b.Config.Resources r := b.Config.Resources
result := make([]*jobs.Task, 0) result := make(map[string]([]jobs.Task), 0)
for k := range b.Config.Resources.Jobs { for k := range b.Config.Resources.Jobs {
tasks := r.Jobs[k].JobSettings.Tasks result[k] = append(result[k], r.Jobs[k].JobSettings.Tasks...)
for i := range tasks {
task := &tasks[i]
result = append(result, task)
}
} }
return result return result
} }
func FindAllEnvironments(b *bundle.Bundle) map[string]([]jobs.JobEnvironment) {
jobEnvs := make(map[string]([]jobs.JobEnvironment), 0)
for jobKey, job := range b.Config.Resources.Jobs {
if len(job.Environments) == 0 {
continue
}
jobEnvs[jobKey] = job.Environments
}
return jobEnvs
}
func isEnvsWithLocalLibraries(envs []jobs.JobEnvironment) bool {
for _, e := range envs {
for _, l := range e.Spec.Dependencies {
if IsEnvironmentDependencyLocal(l) {
return true
}
}
}
return false
}
func FindAllWheelTasksWithLocalLibraries(b *bundle.Bundle) []*jobs.Task { func FindAllWheelTasksWithLocalLibraries(b *bundle.Bundle) []*jobs.Task {
tasks := findAllTasks(b) tasks := findAllTasks(b)
envs := FindAllEnvironments(b)
wheelTasks := make([]*jobs.Task, 0) wheelTasks := make([]*jobs.Task, 0)
for _, task := range tasks { for k, jobTasks := range tasks {
if task.PythonWheelTask != nil && IsTaskWithLocalLibraries(task) { for i := range jobTasks {
wheelTasks = append(wheelTasks, task) task := &jobTasks[i]
if task.PythonWheelTask == nil {
continue
}
if isTaskWithLocalLibraries(*task) {
wheelTasks = append(wheelTasks, task)
}
if envs[k] != nil && isEnvsWithLocalLibraries(envs[k]) {
wheelTasks = append(wheelTasks, task)
}
} }
} }
return wheelTasks return wheelTasks
} }
func IsTaskWithLocalLibraries(task *jobs.Task) bool { func isTaskWithLocalLibraries(task jobs.Task) bool {
for _, l := range task.Libraries { for _, l := range task.Libraries {
if IsLocalLibrary(&l) { if IsLocalLibrary(&l) {
return true return true
@ -49,7 +75,7 @@ func IsTaskWithLocalLibraries(task *jobs.Task) bool {
return false return false
} }
func IsTaskWithWorkspaceLibraries(task *jobs.Task) bool { func IsTaskWithWorkspaceLibraries(task jobs.Task) bool {
for _, l := range task.Libraries { for _, l := range task.Libraries {
if IsWorkspaceLibrary(&l) { if IsWorkspaceLibrary(&l) {
return true return true
@ -58,73 +84,3 @@ func IsTaskWithWorkspaceLibraries(task *jobs.Task) bool {
return false return false
} }
func findLibraryMatches(lib *compute.Library, b *bundle.Bundle) ([]string, error) {
path := libraryPath(lib)
if path == "" {
return nil, nil
}
fullPath := filepath.Join(b.RootPath, path)
return filepath.Glob(fullPath)
}
func findArtifactFiles(ctx context.Context, lib *compute.Library, b *bundle.Bundle) ([]*config.ArtifactFile, error) {
matches, err := findLibraryMatches(lib, b)
if err != nil {
return nil, err
}
if len(matches) == 0 && IsLocalLibrary(lib) {
return nil, fmt.Errorf("file %s is referenced in libraries section but doesn't exist on the local file system", libraryPath(lib))
}
var out []*config.ArtifactFile
for _, match := range matches {
af, err := findArtifactFileByLocalPath(match, b)
if err != nil {
cmdio.LogString(ctx, fmt.Sprintf("%s. Skipping uploading. In order to use the define 'artifacts' section", err.Error()))
} else {
out = append(out, af)
}
}
return out, nil
}
func findArtifactFileByLocalPath(path string, b *bundle.Bundle) (*config.ArtifactFile, error) {
for _, a := range b.Config.Artifacts {
for k := range a.Files {
if a.Files[k].Source == path {
return &a.Files[k], nil
}
}
}
return nil, fmt.Errorf("artifact section is not defined for file at %s", path)
}
func MapFilesToTaskLibraries(ctx context.Context, b *bundle.Bundle) map[string][]*compute.Library {
tasks := findAllTasks(b)
out := make(map[string][]*compute.Library)
for _, task := range tasks {
for j := range task.Libraries {
lib := &task.Libraries[j]
if !IsLocalLibrary(lib) {
continue
}
matches, err := findLibraryMatches(lib, b)
if err != nil {
log.Warnf(ctx, "Error matching library to files: %s", err.Error())
continue
}
for _, match := range matches {
out[match] = append(out[match], lib)
}
}
}
return out
}

View File

@ -1,88 +0,0 @@
package libraries
import (
"context"
"path/filepath"
"testing"
"github.com/databricks/cli/bundle"
"github.com/databricks/cli/bundle/config"
"github.com/databricks/cli/bundle/config/resources"
"github.com/databricks/databricks-sdk-go/service/compute"
"github.com/databricks/databricks-sdk-go/service/jobs"
"github.com/stretchr/testify/assert"
)
func TestMapFilesToTaskLibrariesNoGlob(t *testing.T) {
b := &bundle.Bundle{
RootPath: "testdata",
Config: config.Root{
Resources: config.Resources{
Jobs: map[string]*resources.Job{
"job1": {
JobSettings: &jobs.JobSettings{
Tasks: []jobs.Task{
{
Libraries: []compute.Library{
{
Whl: "library1",
},
{
Whl: "library2",
},
{
Whl: "/absolute/path/in/workspace/library3",
},
},
},
{
Libraries: []compute.Library{
{
Whl: "library1",
},
{
Whl: "library2",
},
},
},
},
},
},
"job2": {
JobSettings: &jobs.JobSettings{
Tasks: []jobs.Task{
{
Libraries: []compute.Library{
{
Whl: "library1",
},
{
Whl: "library2",
},
},
},
},
},
},
},
},
},
}
out := MapFilesToTaskLibraries(context.Background(), b)
assert.Len(t, out, 2)
// Pointer equality for "library1"
assert.Equal(t, []*compute.Library{
&b.Config.Resources.Jobs["job1"].JobSettings.Tasks[0].Libraries[0],
&b.Config.Resources.Jobs["job1"].JobSettings.Tasks[1].Libraries[0],
&b.Config.Resources.Jobs["job2"].JobSettings.Tasks[0].Libraries[0],
}, out[filepath.Clean("testdata/library1")])
// Pointer equality for "library2"
assert.Equal(t, []*compute.Library{
&b.Config.Resources.Jobs["job1"].JobSettings.Tasks[0].Libraries[1],
&b.Config.Resources.Jobs["job1"].JobSettings.Tasks[1].Libraries[1],
&b.Config.Resources.Jobs["job2"].JobSettings.Tasks[0].Libraries[1],
}, out[filepath.Clean("testdata/library2")])
}

View File

@ -38,6 +38,25 @@ func IsLocalPath(p string) bool {
return !path.IsAbs(p) return !path.IsAbs(p)
} }
// IsEnvironmentDependencyLocal returns true if the specified dependency
// should be interpreted as a local path.
// We use this to check if the dependency in environment spec is local.
// We can't use IsLocalPath beacuse environment dependencies can be
// a pypi package name which can be misinterpreted as a local path by IsLocalPath.
func IsEnvironmentDependencyLocal(dep string) bool {
possiblePrefixes := []string{
".",
}
for _, prefix := range possiblePrefixes {
if strings.HasPrefix(dep, prefix) {
return true
}
}
return false
}
func isRemoteStorageScheme(path string) bool { func isRemoteStorageScheme(path string) bool {
url, err := url.Parse(path) url, err := url.Parse(path)
if err != nil { if err != nil {

View File

@ -5,6 +5,7 @@ import (
"github.com/databricks/databricks-sdk-go/service/compute" "github.com/databricks/databricks-sdk-go/service/compute"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
) )
func TestIsLocalPath(t *testing.T) { func TestIsLocalPath(t *testing.T) {
@ -41,3 +42,31 @@ func TestIsLocalLibrary(t *testing.T) {
// Empty. // Empty.
assert.False(t, IsLocalLibrary(&compute.Library{})) assert.False(t, IsLocalLibrary(&compute.Library{}))
} }
func TestIsEnvironmentDependencyLocal(t *testing.T) {
testCases := [](struct {
path string
expected bool
}){
{path: "./local/*.whl", expected: true},
{path: ".\\local\\*.whl", expected: true},
{path: "./local/mypath.whl", expected: true},
{path: ".\\local\\mypath.whl", expected: true},
{path: "../local/*.whl", expected: true},
{path: "..\\local\\*.whl", expected: true},
{path: "./../local/*.whl", expected: true},
{path: ".\\..\\local\\*.whl", expected: true},
{path: "../../local/*.whl", expected: true},
{path: "..\\..\\local\\*.whl", expected: true},
{path: "pypipackage", expected: false},
{path: "pypipackage/test.whl", expected: false},
{path: "pypipackage/*.whl", expected: false},
{path: "/Volumes/catalog/schema/volume/path.whl", expected: false},
{path: "/Workspace/my_project/dist.whl", expected: false},
{path: "-r /Workspace/my_project/requirements.txt", expected: false},
}
for _, tc := range testCases {
require.Equal(t, IsEnvironmentDependencyLocal(tc.path), tc.expected)
}
}

View File

@ -2,44 +2,77 @@ package libraries
import ( import (
"context" "context"
"fmt"
"path/filepath"
"github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle"
"github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/diag"
"github.com/databricks/databricks-sdk-go/service/compute"
"github.com/databricks/databricks-sdk-go/service/jobs" "github.com/databricks/databricks-sdk-go/service/jobs"
) )
type match struct { type match struct {
} }
func MatchWithArtifacts() bundle.Mutator { func ValidateLocalLibrariesExist() bundle.Mutator {
return &match{} return &match{}
} }
func (a *match) Name() string { func (a *match) Name() string {
return "libraries.MatchWithArtifacts" return "libraries.ValidateLocalLibrariesExist"
} }
func (a *match) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { func (a *match) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
tasks := findAllTasks(b) for _, job := range b.Config.Resources.Jobs {
for _, task := range tasks { err := validateEnvironments(job.Environments, b)
if isMissingRequiredLibraries(task) { if err != nil {
return diag.Errorf("task '%s' is missing required libraries. Please include your package code in task libraries block", task.TaskKey) return diag.FromErr(err)
} }
for j := range task.Libraries {
lib := &task.Libraries[j] for _, task := range job.JobSettings.Tasks {
_, err := findArtifactFiles(ctx, lib, b) err := validateTaskLibraries(task.Libraries, b)
if err != nil { if err != nil {
return diag.FromErr(err) return diag.FromErr(err)
} }
} }
} }
return nil return nil
} }
func isMissingRequiredLibraries(task *jobs.Task) bool { func validateTaskLibraries(libs []compute.Library, b *bundle.Bundle) error {
if task.Libraries != nil { for _, lib := range libs {
return false path := libraryPath(&lib)
if path == "" || !IsLocalPath(path) {
continue
}
matches, err := filepath.Glob(filepath.Join(b.RootPath, path))
if err != nil {
return err
}
if len(matches) == 0 {
return fmt.Errorf("file %s is referenced in libraries section but doesn't exist on the local file system", libraryPath(&lib))
}
} }
return task.PythonWheelTask != nil || task.SparkJarTask != nil return nil
}
func validateEnvironments(envs []jobs.JobEnvironment, b *bundle.Bundle) error {
for _, env := range envs {
for _, dep := range env.Spec.Dependencies {
matches, err := filepath.Glob(filepath.Join(b.RootPath, dep))
if err != nil {
return err
}
if len(matches) == 0 && IsEnvironmentDependencyLocal(dep) {
return fmt.Errorf("file %s is referenced in environments section but doesn't exist on the local file system", dep)
}
}
}
return nil
} }

View File

@ -1 +1,148 @@
package libraries package libraries
import (
"context"
"testing"
"github.com/databricks/cli/bundle"
"github.com/databricks/cli/bundle/config"
"github.com/databricks/cli/bundle/config/resources"
"github.com/databricks/cli/internal/testutil"
"github.com/databricks/databricks-sdk-go/service/compute"
"github.com/databricks/databricks-sdk-go/service/jobs"
"github.com/stretchr/testify/require"
)
func TestValidateEnvironments(t *testing.T) {
tmpDir := t.TempDir()
testutil.Touch(t, tmpDir, "wheel.whl")
b := &bundle.Bundle{
RootPath: tmpDir,
Config: config.Root{
Resources: config.Resources{
Jobs: map[string]*resources.Job{
"job": {
JobSettings: &jobs.JobSettings{
Environments: []jobs.JobEnvironment{
{
Spec: &compute.Environment{
Dependencies: []string{
"./wheel.whl",
"simplejson",
"/Workspace/Users/foo@bar.com/artifacts/test.whl",
},
},
},
},
},
},
},
},
},
}
diags := bundle.Apply(context.Background(), b, ValidateLocalLibrariesExist())
require.Nil(t, diags)
}
func TestValidateEnvironmentsNoFile(t *testing.T) {
tmpDir := t.TempDir()
b := &bundle.Bundle{
RootPath: tmpDir,
Config: config.Root{
Resources: config.Resources{
Jobs: map[string]*resources.Job{
"job": {
JobSettings: &jobs.JobSettings{
Environments: []jobs.JobEnvironment{
{
Spec: &compute.Environment{
Dependencies: []string{
"./wheel.whl",
"simplejson",
"/Workspace/Users/foo@bar.com/artifacts/test.whl",
},
},
},
},
},
},
},
},
},
}
diags := bundle.Apply(context.Background(), b, ValidateLocalLibrariesExist())
require.Len(t, diags, 1)
require.Equal(t, "file ./wheel.whl is referenced in environments section but doesn't exist on the local file system", diags[0].Summary)
}
func TestValidateTaskLibraries(t *testing.T) {
tmpDir := t.TempDir()
testutil.Touch(t, tmpDir, "wheel.whl")
b := &bundle.Bundle{
RootPath: tmpDir,
Config: config.Root{
Resources: config.Resources{
Jobs: map[string]*resources.Job{
"job": {
JobSettings: &jobs.JobSettings{
Tasks: []jobs.Task{
{
Libraries: []compute.Library{
{
Whl: "./wheel.whl",
},
{
Whl: "/Workspace/Users/foo@bar.com/artifacts/test.whl",
},
},
},
},
},
},
},
},
},
}
diags := bundle.Apply(context.Background(), b, ValidateLocalLibrariesExist())
require.Nil(t, diags)
}
func TestValidateTaskLibrariesNoFile(t *testing.T) {
tmpDir := t.TempDir()
b := &bundle.Bundle{
RootPath: tmpDir,
Config: config.Root{
Resources: config.Resources{
Jobs: map[string]*resources.Job{
"job": {
JobSettings: &jobs.JobSettings{
Tasks: []jobs.Task{
{
Libraries: []compute.Library{
{
Whl: "./wheel.whl",
},
{
Whl: "/Workspace/Users/foo@bar.com/artifacts/test.whl",
},
},
},
},
},
},
},
},
},
}
diags := bundle.Apply(context.Background(), b, ValidateLocalLibrariesExist())
require.Len(t, diags, 1)
require.Equal(t, "file ./wheel.whl is referenced in libraries section but doesn't exist on the local file system", diags[0].Summary)
}

View File

@ -0,0 +1,29 @@
package bundle
import (
"context"
"github.com/databricks/cli/libs/diag"
"github.com/databricks/cli/libs/log"
)
// ReadOnlyMutator is the interface type that allows access to bundle configuration but does not allow any mutations.
type ReadOnlyMutator interface {
// Name returns the mutators name.
Name() string
// Apply access the specified read-only bundle object.
Apply(context.Context, ReadOnlyBundle) diag.Diagnostics
}
func ApplyReadOnly(ctx context.Context, rb ReadOnlyBundle, m ReadOnlyMutator) diag.Diagnostics {
ctx = log.NewContext(ctx, log.GetLogger(ctx).With("mutator (read-only)", m.Name()))
log.Debugf(ctx, "ApplyReadOnly")
diags := m.Apply(ctx, rb)
if err := diags.Error(); err != nil {
log.Errorf(ctx, "Error: %s", err)
}
return diags
}

43
bundle/parallel.go Normal file
View File

@ -0,0 +1,43 @@
package bundle
import (
"context"
"sync"
"github.com/databricks/cli/libs/diag"
)
type parallel struct {
mutators []ReadOnlyMutator
}
func (m *parallel) Name() string {
return "parallel"
}
func (m *parallel) Apply(ctx context.Context, rb ReadOnlyBundle) diag.Diagnostics {
var wg sync.WaitGroup
var mu sync.Mutex
var diags diag.Diagnostics
wg.Add(len(m.mutators))
for _, mutator := range m.mutators {
go func(mutator ReadOnlyMutator) {
defer wg.Done()
d := ApplyReadOnly(ctx, rb, mutator)
mu.Lock()
diags = diags.Extend(d)
mu.Unlock()
}(mutator)
}
wg.Wait()
return diags
}
// Parallel runs the given mutators in parallel.
func Parallel(mutators ...ReadOnlyMutator) ReadOnlyMutator {
return &parallel{
mutators: mutators,
}
}

73
bundle/parallel_test.go Normal file
View File

@ -0,0 +1,73 @@
package bundle
import (
"context"
"testing"
"github.com/databricks/cli/bundle/config"
"github.com/databricks/cli/libs/diag"
"github.com/stretchr/testify/require"
)
type addToContainer struct {
container *[]int
value int
err bool
}
func (m *addToContainer) Apply(ctx context.Context, b ReadOnlyBundle) diag.Diagnostics {
if m.err {
return diag.Errorf("error")
}
c := *m.container
c = append(c, m.value)
*m.container = c
return nil
}
func (m *addToContainer) Name() string {
return "addToContainer"
}
func TestParallelMutatorWork(t *testing.T) {
b := &Bundle{
Config: config.Root{},
}
container := []int{}
m1 := &addToContainer{container: &container, value: 1}
m2 := &addToContainer{container: &container, value: 2}
m3 := &addToContainer{container: &container, value: 3}
m := Parallel(m1, m2, m3)
// Apply the mutator
diags := ApplyReadOnly(context.Background(), ReadOnly(b), m)
require.Empty(t, diags)
require.Len(t, container, 3)
require.Contains(t, container, 1)
require.Contains(t, container, 2)
require.Contains(t, container, 3)
}
func TestParallelMutatorWorkWithErrors(t *testing.T) {
b := &Bundle{
Config: config.Root{},
}
container := []int{}
m1 := &addToContainer{container: &container, value: 1}
m2 := &addToContainer{container: &container, err: true, value: 2}
m3 := &addToContainer{container: &container, value: 3}
m := Parallel(m1, m2, m3)
// Apply the mutator
diags := ApplyReadOnly(context.Background(), ReadOnly(b), m)
require.Len(t, diags, 1)
require.Equal(t, "error", diags[0].Summary)
require.Len(t, container, 2)
require.Contains(t, container, 1)
require.Contains(t, container, 3)
}

View File

@ -13,6 +13,7 @@ import (
const CAN_MANAGE = "CAN_MANAGE" const CAN_MANAGE = "CAN_MANAGE"
const CAN_VIEW = "CAN_VIEW" const CAN_VIEW = "CAN_VIEW"
const CAN_RUN = "CAN_RUN" const CAN_RUN = "CAN_RUN"
const IS_OWNER = "IS_OWNER"
var allowedLevels = []string{CAN_MANAGE, CAN_VIEW, CAN_RUN} var allowedLevels = []string{CAN_MANAGE, CAN_VIEW, CAN_RUN}
var levelsMap = map[string](map[string]string){ var levelsMap = map[string](map[string]string){

View File

@ -26,7 +26,7 @@ func Deploy() bundle.Mutator {
terraform.StatePull(), terraform.StatePull(),
deploy.StatePull(), deploy.StatePull(),
mutator.ValidateGitDetails(), mutator.ValidateGitDetails(),
libraries.MatchWithArtifacts(), libraries.ValidateLocalLibrariesExist(),
artifacts.CleanUp(), artifacts.CleanUp(),
artifacts.UploadAll(), artifacts.UploadAll(),
python.TransformWheelTask(), python.TransformWheelTask(),

View File

@ -28,6 +28,7 @@ func Initialize() bundle.Mutator {
mutator.ExpandWorkspaceRoot(), mutator.ExpandWorkspaceRoot(),
mutator.DefineDefaultWorkspacePaths(), mutator.DefineDefaultWorkspacePaths(),
mutator.SetVariables(), mutator.SetVariables(),
mutator.ResolveVariableReferencesInLookup(),
mutator.ResolveResourceReferences(), mutator.ResolveResourceReferences(),
mutator.ResolveVariableReferences( mutator.ResolveVariableReferences(
"bundle", "bundle",
@ -38,6 +39,7 @@ func Initialize() bundle.Mutator {
mutator.SetRunAs(), mutator.SetRunAs(),
mutator.OverrideCompute(), mutator.OverrideCompute(),
mutator.ProcessTargetMode(), mutator.ProcessTargetMode(),
mutator.DefaultQueueing(),
mutator.ExpandPipelineGlobPaths(), mutator.ExpandPipelineGlobPaths(),
mutator.TranslatePaths(), mutator.TranslatePaths(),
python.WrapperWarning(), python.WrapperWarning(),

View File

@ -104,7 +104,7 @@ func (t *pythonTrampoline) GetTasks(b *bundle.Bundle) []mutator.TaskWithJobKey {
// At this point of moment we don't have local paths in Libraries sections anymore // At this point of moment we don't have local paths in Libraries sections anymore
// Local paths have been replaced with the remote when the artifacts where uploaded // Local paths have been replaced with the remote when the artifacts where uploaded
// in artifacts.UploadAll mutator. // in artifacts.UploadAll mutator.
if task.PythonWheelTask == nil || !needsTrampoline(task) { if task.PythonWheelTask == nil || !needsTrampoline(*task) {
continue continue
} }
@ -117,7 +117,7 @@ func (t *pythonTrampoline) GetTasks(b *bundle.Bundle) []mutator.TaskWithJobKey {
return result return result
} }
func needsTrampoline(task *jobs.Task) bool { func needsTrampoline(task jobs.Task) bool {
return libraries.IsTaskWithWorkspaceLibraries(task) return libraries.IsTaskWithWorkspaceLibraries(task)
} }

View File

@ -25,7 +25,7 @@ func (m *wrapperWarning) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagn
} }
if hasIncompatibleWheelTasks(ctx, b) { if hasIncompatibleWheelTasks(ctx, b) {
return diag.Errorf("python wheel tasks with local libraries require compute with DBR 13.1+. Please change your cluster configuration or set experimental 'python_wheel_wrapper' setting to 'true'") return diag.Errorf("Python wheel tasks require compute with DBR 13.3+ to include local libraries. Please change your cluster configuration or use the experimental 'python_wheel_wrapper' setting. See https://docs.databricks.com/dev-tools/bundles/python-wheel.html for more information.")
} }
return nil return nil
} }

View File

@ -102,7 +102,7 @@ func TestIncompatibleWheelTasksWithJobClusterKey(t *testing.T) {
require.True(t, hasIncompatibleWheelTasks(context.Background(), b)) require.True(t, hasIncompatibleWheelTasks(context.Background(), b))
diags := bundle.Apply(context.Background(), b, WrapperWarning()) diags := bundle.Apply(context.Background(), b, WrapperWarning())
require.ErrorContains(t, diags.Error(), "python wheel tasks with local libraries require compute with DBR 13.1+.") require.ErrorContains(t, diags.Error(), "require compute with DBR 13.3")
} }
func TestIncompatibleWheelTasksWithExistingClusterId(t *testing.T) { func TestIncompatibleWheelTasksWithExistingClusterId(t *testing.T) {

127
bundle/run/args.go Normal file
View File

@ -0,0 +1,127 @@
package run
import (
"fmt"
"strings"
"github.com/spf13/cobra"
)
// argsHandler defines the (unexported) interface for the runners in this
// package to implement to handle context-specific positional arguments.
//
// For jobs, this means:
// - If a job uses job parameters: parse positional arguments into key-value pairs
// and pass them as job parameters.
// - If a job does not use job parameters AND only has Spark Python tasks:
// pass through the positional arguments as a list of Python parameters.
// - If a job does not use job parameters AND only has notebook tasks:
// parse arguments into key-value pairs and pass them as notebook parameters.
// - ...
//
// In all cases, we may be able to provide context-aware argument completions.
type argsHandler interface {
// Parse additional positional arguments.
ParseArgs(args []string, opts *Options) error
// Complete additional positional arguments.
CompleteArgs(args []string, toComplete string) ([]string, cobra.ShellCompDirective)
}
// nopArgsHandler is a no-op implementation of [argsHandler].
// It returns an error if any positional arguments are present and doesn't complete anything.
type nopArgsHandler struct{}
func (nopArgsHandler) ParseArgs(args []string, opts *Options) error {
if len(args) == 0 {
return nil
}
return fmt.Errorf("received %d unexpected positional arguments", len(args))
}
func (nopArgsHandler) CompleteArgs(args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return nil, cobra.ShellCompDirectiveNoFileComp
}
// argsToKeyValueMap parses key-value pairs from the specified arguments.
//
// It accepts these formats:
// - `--key=value`
// - `--key`, `value`
//
// Remaining arguments are returned as-is.
func argsToKeyValueMap(args []string) (map[string]string, []string) {
kv := make(map[string]string)
key := ""
tail := args
for i, arg := range args {
// If key is set; use the next argument as value.
if key != "" {
kv[key] = arg
key = ""
tail = args[i+1:]
continue
}
if strings.HasPrefix(arg, "--") {
parts := strings.SplitN(arg[2:], "=", 2)
if len(parts) == 2 {
kv[parts[0]] = parts[1]
tail = args[i+1:]
continue
}
// Use this argument as key, the next as value.
key = parts[0]
continue
}
// If we cannot interpret it; return here.
break
}
return kv, tail
}
// genericParseKeyValueArgs parses key-value pairs from the specified arguments.
// If there are any positional arguments left, it returns an error.
func genericParseKeyValueArgs(args []string) (map[string]string, error) {
kv, args := argsToKeyValueMap(args)
if len(args) > 0 {
return nil, fmt.Errorf("received %d unexpected positional arguments", len(args))
}
return kv, nil
}
// genericCompleteKeyValueArgs completes key-value pairs from the specified arguments.
// Completion options that are already specified are skipped.
func genericCompleteKeyValueArgs(args []string, toComplete string, options []string) ([]string, cobra.ShellCompDirective) {
// If the string to complete contains an equals sign, then we are
// completing the value part (which we don't know here).
if strings.Contains(toComplete, "=") {
return nil, cobra.ShellCompDirectiveNoFileComp
}
// Remove already completed key/value pairs.
kv, args := argsToKeyValueMap(args)
// If the list of remaining args is empty, return possible completions.
if len(args) == 0 {
var completions []string
for _, option := range options {
// Skip options that have already been specified.
if _, ok := kv[option]; ok {
continue
}
completions = append(completions, fmt.Sprintf("--%s=", option))
}
// Note: we include cobra.ShellCompDirectiveNoSpace to suggest including
// the value part right after the equals sign.
return completions, cobra.ShellCompDirectiveNoFileComp | cobra.ShellCompDirectiveNoSpace
}
return nil, cobra.ShellCompDirectiveNoFileComp
}

134
bundle/run/args_test.go Normal file
View File

@ -0,0 +1,134 @@
package run
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestNopArgsHandler(t *testing.T) {
h := nopArgsHandler{}
opts := &Options{}
// No error if no positional arguments are passed.
err := h.ParseArgs([]string{}, opts)
assert.NoError(t, err)
// Error if any positional arguments are passed.
err = h.ParseArgs([]string{"foo"}, opts)
assert.EqualError(t, err, "received 1 unexpected positional arguments")
// No completions.
completions, _ := h.CompleteArgs([]string{}, "")
assert.Nil(t, completions)
}
func TestArgsToKeyValueMap(t *testing.T) {
for _, tc := range []struct {
input []string
expected map[string]string
tail []string
err error
}{
{
input: []string{},
expected: map[string]string{},
tail: []string{},
},
{
input: []string{"--foo=bar", "--baz", "qux"},
expected: map[string]string{
"foo": "bar",
"baz": "qux",
},
tail: []string{},
},
{
input: []string{"--foo=bar", "--baz", "qux", "tail"},
expected: map[string]string{
"foo": "bar",
"baz": "qux",
},
tail: []string{"tail"},
},
{
input: []string{"--foo=bar", "--baz", "qux", "tail", "--foo=bar"},
expected: map[string]string{
"foo": "bar",
"baz": "qux",
},
tail: []string{"tail", "--foo=bar"},
},
{
input: []string{"--foo=bar", "--baz=qux"},
expected: map[string]string{
"foo": "bar",
"baz": "qux",
},
tail: []string{},
},
{
input: []string{"--foo=bar", "--baz=--qux"},
expected: map[string]string{
"foo": "bar",
"baz": "--qux",
},
tail: []string{},
},
{
input: []string{"--foo=bar", "--baz="},
expected: map[string]string{
"foo": "bar",
"baz": "",
},
tail: []string{},
},
{
input: []string{"--foo=bar", "--baz"},
expected: map[string]string{
"foo": "bar",
},
tail: []string{"--baz"},
},
} {
actual, tail := argsToKeyValueMap(tc.input)
assert.Equal(t, tc.expected, actual)
assert.Equal(t, tc.tail, tail)
}
}
func TestGenericParseKeyValueArgs(t *testing.T) {
kv, err := genericParseKeyValueArgs([]string{"--foo=bar", "--baz", "qux"})
assert.NoError(t, err)
assert.Equal(t, map[string]string{
"foo": "bar",
"baz": "qux",
}, kv)
_, err = genericParseKeyValueArgs([]string{"--foo=bar", "--baz", "qux", "tail"})
assert.EqualError(t, err, "received 1 unexpected positional arguments")
}
func TestGenericCompleteKeyValueArgs(t *testing.T) {
var completions []string
// Complete nothing if there are no options.
completions, _ = genericCompleteKeyValueArgs([]string{}, ``, []string{})
assert.Empty(t, completions)
// Complete nothing if we're in the middle of a key-value pair (as single argument with equals sign).
completions, _ = genericCompleteKeyValueArgs([]string{}, `--foo=`, []string{`foo`, `bar`})
assert.Empty(t, completions)
// Complete nothing if we're in the middle of a key-value pair (as two arguments).
completions, _ = genericCompleteKeyValueArgs([]string{`--foo`}, ``, []string{`foo`, `bar`})
assert.Empty(t, completions)
// Complete if we're at the beginning.
completions, _ = genericCompleteKeyValueArgs([]string{}, ``, []string{`foo`, `bar`})
assert.Equal(t, []string{`--foo=`, `--bar=`}, completions)
// Complete if we have already one key-value pair.
completions, _ = genericCompleteKeyValueArgs([]string{`--foo=bar`}, ``, []string{`foo`, `bar`})
assert.Equal(t, []string{`--bar=`}, completions)
}

View File

@ -15,6 +15,7 @@ import (
"github.com/databricks/cli/libs/log" "github.com/databricks/cli/libs/log"
"github.com/databricks/databricks-sdk-go/service/jobs" "github.com/databricks/databricks-sdk-go/service/jobs"
"github.com/fatih/color" "github.com/fatih/color"
"github.com/spf13/cobra"
"golang.org/x/sync/errgroup" "golang.org/x/sync/errgroup"
) )
@ -315,3 +316,11 @@ func (r *jobRunner) Cancel(ctx context.Context) error {
return errGroup.Wait() return errGroup.Wait()
} }
func (r *jobRunner) ParseArgs(args []string, opts *Options) error {
return r.posArgsHandler().ParseArgs(args, opts)
}
func (r *jobRunner) CompleteArgs(args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return r.posArgsHandler().CompleteArgs(args, toComplete)
}

184
bundle/run/job_args.go Normal file
View File

@ -0,0 +1,184 @@
package run
import (
"github.com/databricks/cli/bundle/config/resources"
"github.com/spf13/cobra"
"golang.org/x/exp/maps"
)
type jobParameterArgs struct {
*resources.Job
}
func (a jobParameterArgs) ParseArgs(args []string, opts *Options) error {
kv, err := genericParseKeyValueArgs(args)
if err != nil {
return err
}
// Merge the key-value pairs from the args into the options struct.
if opts.Job.jobParams == nil {
opts.Job.jobParams = kv
} else {
for k, v := range kv {
opts.Job.jobParams[k] = v
}
}
return nil
}
func (a jobParameterArgs) CompleteArgs(args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
var completions []string
for _, param := range a.Parameters {
completions = append(completions, param.Name)
}
return genericCompleteKeyValueArgs(args, toComplete, completions)
}
type jobTaskNotebookParamArgs struct {
*resources.Job
}
func (a jobTaskNotebookParamArgs) ParseArgs(args []string, opts *Options) error {
kv, err := genericParseKeyValueArgs(args)
if err != nil {
return err
}
// Merge the key-value pairs from the args into the options struct.
if opts.Job.notebookParams == nil {
opts.Job.notebookParams = kv
} else {
for k, v := range kv {
opts.Job.notebookParams[k] = v
}
}
return nil
}
func (a jobTaskNotebookParamArgs) CompleteArgs(args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
parameters := make(map[string]string)
for _, t := range a.Tasks {
if nt := t.NotebookTask; nt != nil {
maps.Copy(parameters, nt.BaseParameters)
}
}
return genericCompleteKeyValueArgs(args, toComplete, maps.Keys(parameters))
}
type jobTaskJarParamArgs struct {
*resources.Job
}
func (a jobTaskJarParamArgs) ParseArgs(args []string, opts *Options) error {
opts.Job.jarParams = append(opts.Job.jarParams, args...)
return nil
}
func (a jobTaskJarParamArgs) CompleteArgs(args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return nil, cobra.ShellCompDirectiveNoFileComp
}
type jobTaskPythonParamArgs struct {
*resources.Job
}
func (a jobTaskPythonParamArgs) ParseArgs(args []string, opts *Options) error {
opts.Job.pythonParams = append(opts.Job.pythonParams, args...)
return nil
}
func (a jobTaskPythonParamArgs) CompleteArgs(args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return nil, cobra.ShellCompDirectiveNoFileComp
}
type jobTaskSparkSubmitParamArgs struct {
*resources.Job
}
func (a jobTaskSparkSubmitParamArgs) ParseArgs(args []string, opts *Options) error {
opts.Job.sparkSubmitParams = append(opts.Job.sparkSubmitParams, args...)
return nil
}
func (a jobTaskSparkSubmitParamArgs) CompleteArgs(args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return nil, cobra.ShellCompDirectiveNoFileComp
}
type jobTaskType int
const (
jobTaskTypeNotebook jobTaskType = iota + 1
jobTaskTypeSparkJar
jobTaskTypeSparkPython
jobTaskTypeSparkSubmit
jobTaskTypePipeline
jobTaskTypePythonWheel
jobTaskTypeSql
jobTaskTypeDbt
jobTaskTypeRunJob
)
func (r *jobRunner) posArgsHandler() argsHandler {
job := r.job
if job == nil || job.JobSettings == nil {
return nopArgsHandler{}
}
// Handle job parameters, if any are defined.
if len(job.Parameters) > 0 {
return &jobParameterArgs{job}
}
// Handle task parameters otherwise.
var seen = make(map[jobTaskType]bool)
for _, t := range job.Tasks {
if t.NotebookTask != nil {
seen[jobTaskTypeNotebook] = true
}
if t.SparkJarTask != nil {
seen[jobTaskTypeSparkJar] = true
}
if t.SparkPythonTask != nil {
seen[jobTaskTypeSparkPython] = true
}
if t.SparkSubmitTask != nil {
seen[jobTaskTypeSparkSubmit] = true
}
if t.PipelineTask != nil {
seen[jobTaskTypePipeline] = true
}
if t.PythonWheelTask != nil {
seen[jobTaskTypePythonWheel] = true
}
if t.SqlTask != nil {
seen[jobTaskTypeSql] = true
}
if t.DbtTask != nil {
seen[jobTaskTypeDbt] = true
}
if t.RunJobTask != nil {
seen[jobTaskTypeRunJob] = true
}
}
// Cannot handle positional arguments if we have more than one task type.
keys := maps.Keys(seen)
if len(keys) != 1 {
return nopArgsHandler{}
}
switch keys[0] {
case jobTaskTypeNotebook:
return jobTaskNotebookParamArgs{job}
case jobTaskTypeSparkJar:
return jobTaskJarParamArgs{job}
case jobTaskTypeSparkPython, jobTaskTypePythonWheel:
return jobTaskPythonParamArgs{job}
case jobTaskTypeSparkSubmit:
return jobTaskSparkSubmitParamArgs{job}
default:
// No positional argument handling for other task types.
return nopArgsHandler{}
}
}

223
bundle/run/job_args_test.go Normal file
View File

@ -0,0 +1,223 @@
package run
import (
"testing"
"github.com/databricks/cli/bundle/config/resources"
"github.com/databricks/databricks-sdk-go/service/jobs"
"github.com/stretchr/testify/assert"
)
func TestJobParameterArgs(t *testing.T) {
a := jobParameterArgs{
&resources.Job{
JobSettings: &jobs.JobSettings{
Parameters: []jobs.JobParameterDefinition{
{
Name: "foo",
Default: "value",
},
{
Name: "bar",
Default: "value",
},
},
},
},
}
t.Run("ParseArgsError", func(t *testing.T) {
var opts Options
err := a.ParseArgs([]string{"--p1=v1", "superfluous"}, &opts)
assert.ErrorContains(t, err, "unexpected positional arguments")
})
t.Run("ParseArgs", func(t *testing.T) {
var opts Options
err := a.ParseArgs([]string{"--p1=v1", "--p2=v2"}, &opts)
assert.NoError(t, err)
assert.Equal(
t,
map[string]string{
"p1": "v1",
"p2": "v2",
},
opts.Job.jobParams,
)
})
t.Run("ParseArgsAppend", func(t *testing.T) {
var opts Options
opts.Job.jobParams = map[string]string{"p1": "v1"}
err := a.ParseArgs([]string{"--p2=v2"}, &opts)
assert.NoError(t, err)
assert.Equal(
t,
map[string]string{
"p1": "v1",
"p2": "v2",
},
opts.Job.jobParams,
)
})
t.Run("CompleteArgs", func(t *testing.T) {
completions, _ := a.CompleteArgs([]string{}, "")
assert.Equal(t, []string{"--foo=", "--bar="}, completions)
})
}
func TestJobTaskNotebookParamArgs(t *testing.T) {
a := jobTaskNotebookParamArgs{
&resources.Job{
JobSettings: &jobs.JobSettings{
Tasks: []jobs.Task{
{
NotebookTask: &jobs.NotebookTask{
BaseParameters: map[string]string{
"foo": "value",
"bar": "value",
},
},
},
},
},
},
}
t.Run("ParseArgsError", func(t *testing.T) {
var opts Options
err := a.ParseArgs([]string{"--p1=v1", "superfluous"}, &opts)
assert.ErrorContains(t, err, "unexpected positional arguments")
})
t.Run("ParseArgs", func(t *testing.T) {
var opts Options
err := a.ParseArgs([]string{"--p1=v1", "--p2=v2"}, &opts)
assert.NoError(t, err)
assert.Equal(
t,
map[string]string{
"p1": "v1",
"p2": "v2",
},
opts.Job.notebookParams,
)
})
t.Run("ParseArgsAppend", func(t *testing.T) {
var opts Options
opts.Job.notebookParams = map[string]string{"p1": "v1"}
err := a.ParseArgs([]string{"--p2=v2"}, &opts)
assert.NoError(t, err)
assert.Equal(
t,
map[string]string{
"p1": "v1",
"p2": "v2",
},
opts.Job.notebookParams,
)
})
t.Run("CompleteArgs", func(t *testing.T) {
completions, _ := a.CompleteArgs([]string{}, "")
assert.ElementsMatch(t, []string{"--foo=", "--bar="}, completions)
})
}
func TestJobTaskJarParamArgs(t *testing.T) {
a := jobTaskJarParamArgs{}
t.Run("ParseArgs", func(t *testing.T) {
var opts Options
err := a.ParseArgs([]string{"foo", "bar"}, &opts)
assert.NoError(t, err)
assert.Equal(
t,
[]string{"foo", "bar"},
opts.Job.jarParams,
)
})
t.Run("ParseArgsAppend", func(t *testing.T) {
var opts Options
opts.Job.jarParams = []string{"foo"}
err := a.ParseArgs([]string{"bar"}, &opts)
assert.NoError(t, err)
assert.Equal(
t,
[]string{"foo", "bar"},
opts.Job.jarParams,
)
})
t.Run("CompleteArgs", func(t *testing.T) {
completions, _ := a.CompleteArgs([]string{}, "")
assert.Empty(t, completions)
})
}
func TestJobTaskPythonParamArgs(t *testing.T) {
a := jobTaskPythonParamArgs{}
t.Run("ParseArgs", func(t *testing.T) {
var opts Options
err := a.ParseArgs([]string{"foo", "bar"}, &opts)
assert.NoError(t, err)
assert.Equal(
t,
[]string{"foo", "bar"},
opts.Job.pythonParams,
)
})
t.Run("ParseArgsAppend", func(t *testing.T) {
var opts Options
opts.Job.pythonParams = []string{"foo"}
err := a.ParseArgs([]string{"bar"}, &opts)
assert.NoError(t, err)
assert.Equal(
t,
[]string{"foo", "bar"},
opts.Job.pythonParams,
)
})
t.Run("CompleteArgs", func(t *testing.T) {
completions, _ := a.CompleteArgs([]string{}, "")
assert.Empty(t, completions)
})
}
func TestJobTaskSparkSubmitParamArgs(t *testing.T) {
a := jobTaskSparkSubmitParamArgs{}
t.Run("ParseArgs", func(t *testing.T) {
var opts Options
err := a.ParseArgs([]string{"foo", "bar"}, &opts)
assert.NoError(t, err)
assert.Equal(
t,
[]string{"foo", "bar"},
opts.Job.sparkSubmitParams,
)
})
t.Run("ParseArgsAppend", func(t *testing.T) {
var opts Options
opts.Job.sparkSubmitParams = []string{"foo"}
err := a.ParseArgs([]string{"bar"}, &opts)
assert.NoError(t, err)
assert.Equal(
t,
[]string{"foo", "bar"},
opts.Job.sparkSubmitParams,
)
})
t.Run("CompleteArgs", func(t *testing.T) {
completions, _ := a.CompleteArgs([]string{}, "")
assert.Empty(t, completions)
})
}

View File

@ -12,6 +12,7 @@ import (
"github.com/databricks/cli/libs/cmdio" "github.com/databricks/cli/libs/cmdio"
"github.com/databricks/cli/libs/log" "github.com/databricks/cli/libs/log"
"github.com/databricks/databricks-sdk-go/service/pipelines" "github.com/databricks/databricks-sdk-go/service/pipelines"
"github.com/spf13/cobra"
) )
func filterEventsByUpdateId(events []pipelines.PipelineEvent, updateId string) []pipelines.PipelineEvent { func filterEventsByUpdateId(events []pipelines.PipelineEvent, updateId string) []pipelines.PipelineEvent {
@ -181,3 +182,15 @@ func (r *pipelineRunner) Cancel(ctx context.Context) error {
_, err = wait.GetWithTimeout(jobRunTimeout) _, err = wait.GetWithTimeout(jobRunTimeout)
return err return err
} }
func (r *pipelineRunner) ParseArgs(args []string, opts *Options) error {
if len(args) == 0 {
return nil
}
return fmt.Errorf("received %d unexpected positional arguments", len(args))
}
func (r *pipelineRunner) CompleteArgs(args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return nil, cobra.ShellCompDirectiveNoFileComp
}

View File

@ -29,6 +29,9 @@ type Runner interface {
// Cancel the underlying workflow. // Cancel the underlying workflow.
Cancel(ctx context.Context) error Cancel(ctx context.Context) error
// Runners support parsing and completion of additional positional arguments.
argsHandler
} }
// Find locates a runner matching the specified argument. // Find locates a runner matching the specified argument.

View File

@ -1,35 +1,42 @@
# This is a Databricks asset bundle definition for my_project.
# See https://docs.databricks.com/dev-tools/bundles/index.html for documentation.
bundle: bundle:
name: bundle_permissions name: default_python
permissions: include:
- level: CAN_RUN - resources/*.yml
user_name: test@company.com
targets: targets:
development: # The 'dev' target, for development purposes. This target is the default.
dev:
# We use 'mode: development' to indicate this is a personal development copy:
# - Deployed resources get prefixed with '[dev my_user_name]'
# - Any job schedules and triggers are paused by default
# - The 'development' mode is used for Delta Live Tables pipelines
mode: development
default: true
workspace:
host: https://myworkspace.databricks.com
## Optionally, there could be a 'staging' target here.
## (See Databricks docs on CI/CD at https://docs.databricks.com/dev-tools/bundles/ci-cd.html.)
#
# staging:
# workspace:
# host: https://myworkspace.databricks.com
# The 'prod' target, used for production deployment.
prod:
# We use 'mode: production' to indicate this is a production deployment.
# Doing so enables strict verification of the settings below.
mode: production
workspace:
host: https://e2-dogfood.staging.cloud.databricks.com
# We always use /Users/user@company.com for all resources to make sure we only have a single copy.
# If this path results in an error, please make sure you have a recent version of the CLI installed.
root_path: /Users/user@company.com/.bundle/${bundle.name}/${bundle.target}
run_as:
user_name: user@company.com
permissions: permissions:
- level: CAN_MANAGE - level: CAN_MANAGE
group_name: devs group_name: data_scientists@company.com
- level: CAN_VIEW
service_principal_name: 1234-abcd
- level: CAN_RUN
user_name: bot@company.com
resources:
pipelines:
nyc_taxi_pipeline:
target: nyc_taxi_production
development: false
photon: true
jobs:
pipeline_schedule:
name: Daily refresh of production pipeline
schedule:
quartz_cron_expression: 6 6 11 * * ?
timezone_id: UTC
tasks:
- pipeline_task:
pipeline_id: "to be interpolated"

View File

@ -0,0 +1,12 @@
package config_tests
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestEnvironmentKeySupported(t *testing.T) {
_, diags := loadTargetWithDiags("./python_wheel/environment_key", "default")
require.Empty(t, diags)
}

View File

@ -0,0 +1,27 @@
bundle:
name: job_cluster_key
workspace:
host: https://acme.cloud.databricks.com/
targets:
default:
resources:
jobs:
foo:
name: job
tasks:
- task_key: test
job_cluster_key: key
development:
resources:
jobs:
foo:
job_clusters:
- job_cluster_key: key
new_cluster:
node_type_id: i3.xlarge
num_workers: 1
tasks:
- task_key: test
job_cluster_key: key

View File

@ -0,0 +1,28 @@
package config_tests
import (
"context"
"testing"
"github.com/databricks/cli/bundle"
"github.com/databricks/cli/bundle/config/validate"
"github.com/databricks/cli/libs/diag"
"github.com/stretchr/testify/require"
)
func TestJobClusterKeyNotDefinedTest(t *testing.T) {
b := loadTarget(t, "./job_cluster_key", "default")
diags := bundle.ApplyReadOnly(context.Background(), bundle.ReadOnly(b), validate.JobClusterKeyDefined())
require.Len(t, diags, 1)
require.NoError(t, diags.Error())
require.Equal(t, diags[0].Severity, diag.Warning)
require.Equal(t, diags[0].Summary, "job_cluster_key key is not defined")
}
func TestJobClusterKeyDefinedTest(t *testing.T) {
b := loadTarget(t, "./job_cluster_key", "development")
diags := bundle.ApplyReadOnly(context.Background(), bundle.ReadOnly(b), validate.JobClusterKeyDefined())
require.Len(t, diags, 0)
}

View File

@ -0,0 +1,3 @@
build/
*.egg-info
.databricks

View File

@ -0,0 +1,26 @@
bundle:
name: environment_key
artifacts:
my_test_code:
type: whl
path: "./my_test_code"
build: "python3 setup.py bdist_wheel"
resources:
jobs:
test_job:
name: "My Wheel Job"
tasks:
- task_key: TestTask
existing_cluster_id: "0717-132531-5opeqon1"
python_wheel_task:
package_name: "my_test_code"
entry_point: "run"
environment_key: "test_env"
environments:
- environment_key: "test_env"
spec:
client: "1"
dependencies:
- ./my_test_code/dist/*.whl

View File

@ -0,0 +1,15 @@
from setuptools import setup, find_packages
import src
setup(
name="my_test_code",
version=src.__version__,
author=src.__author__,
url="https://databricks.com",
author_email="john.doe@databricks.com",
description="my test wheel",
packages=find_packages(include=["src"]),
entry_points={"group_1": "run=src.__main__:main"},
install_requires=["setuptools"],
)

View File

@ -0,0 +1,2 @@
__version__ = "0.0.1"
__author__ = "Databricks"

View File

@ -0,0 +1,16 @@
"""
The entry point of the Python Wheel
"""
import sys
def main():
# This method will print the provided arguments
print('Hello from my func')
print('Got arguments:')
print(sys.argv)
if __name__ == '__main__':
main()

View File

@ -23,7 +23,7 @@ func TestPythonWheelBuild(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, 1, len(matches)) require.Equal(t, 1, len(matches))
match := libraries.MatchWithArtifacts() match := libraries.ValidateLocalLibrariesExist()
diags = bundle.Apply(ctx, b, match) diags = bundle.Apply(ctx, b, match)
require.NoError(t, diags.Error()) require.NoError(t, diags.Error())
} }
@ -40,7 +40,7 @@ func TestPythonWheelBuildAutoDetect(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, 1, len(matches)) require.Equal(t, 1, len(matches))
match := libraries.MatchWithArtifacts() match := libraries.ValidateLocalLibrariesExist()
diags = bundle.Apply(ctx, b, match) diags = bundle.Apply(ctx, b, match)
require.NoError(t, diags.Error()) require.NoError(t, diags.Error())
} }
@ -53,7 +53,7 @@ func TestPythonWheelWithDBFSLib(t *testing.T) {
diags := bundle.Apply(ctx, b, bundle.Seq(phases.Load(), phases.Build())) diags := bundle.Apply(ctx, b, bundle.Seq(phases.Load(), phases.Build()))
require.NoError(t, diags.Error()) require.NoError(t, diags.Error())
match := libraries.MatchWithArtifacts() match := libraries.ValidateLocalLibrariesExist()
diags = bundle.Apply(ctx, b, match) diags = bundle.Apply(ctx, b, match)
require.NoError(t, diags.Error()) require.NoError(t, diags.Error())
} }
@ -66,7 +66,7 @@ func TestPythonWheelBuildNoBuildJustUpload(t *testing.T) {
diags := bundle.Apply(ctx, b, bundle.Seq(phases.Load(), phases.Build())) diags := bundle.Apply(ctx, b, bundle.Seq(phases.Load(), phases.Build()))
require.NoError(t, diags.Error()) require.NoError(t, diags.Error())
match := libraries.MatchWithArtifacts() match := libraries.ValidateLocalLibrariesExist()
diags = bundle.Apply(ctx, b, match) diags = bundle.Apply(ctx, b, match)
require.ErrorContains(t, diags.Error(), "./non-existing/*.whl") require.ErrorContains(t, diags.Error(), "./non-existing/*.whl")
@ -79,3 +79,20 @@ func TestPythonWheelBuildNoBuildJustUpload(t *testing.T) {
"my_test_code-0.0.1-py3-none-any.whl", "my_test_code-0.0.1-py3-none-any.whl",
)) ))
} }
func TestPythonWheelBuildWithEnvironmentKey(t *testing.T) {
ctx := context.Background()
b, err := bundle.Load(ctx, "./python_wheel/environment_key")
require.NoError(t, err)
diags := bundle.Apply(ctx, b, bundle.Seq(phases.Load(), phases.Build()))
require.NoError(t, diags.Error())
matches, err := filepath.Glob("./python_wheel/environment_key/my_test_code/dist/my_test_code-*.whl")
require.NoError(t, err)
require.Equal(t, 1, len(matches))
match := libraries.ValidateLocalLibrariesExist()
diags = bundle.Apply(ctx, b, match)
require.NoError(t, diags.Error())
}

View File

@ -0,0 +1,68 @@
bundle:
name: "run_as"
run_as:
service_principal_name: "my_service_principal"
experimental:
use_legacy_run_as: true
resources:
jobs:
job_one:
name: Job One
tasks:
- task_key: "task_one"
notebook_task:
notebook_path: "./test.py"
job_two:
name: Job Two
tasks:
- task_key: "task_two"
notebook_task:
notebook_path: "./test.py"
job_three:
name: Job Three
run_as:
service_principal_name: "my_service_principal_for_job"
tasks:
- task_key: "task_three"
notebook_task:
notebook_path: "./test.py"
pipelines:
nyc_taxi_pipeline:
name: "nyc taxi loader"
permissions:
- level: CAN_VIEW
service_principal_name: my_service_principal
- level: CAN_VIEW
user_name: my_user_name
libraries:
- notebook:
path: ./dlt/nyc_taxi_loader
models:
model_one:
name: "skynet"
registered_models:
model_two:
name: "skynet (in UC)"
experiments:
experiment_one:
name: "experiment_one"
model_serving_endpoints:
model_serving_one:
name: "skynet"

View File

@ -13,6 +13,7 @@ import (
"github.com/databricks/databricks-sdk-go/service/catalog" "github.com/databricks/databricks-sdk-go/service/catalog"
"github.com/databricks/databricks-sdk-go/service/iam" "github.com/databricks/databricks-sdk-go/service/iam"
"github.com/databricks/databricks-sdk-go/service/ml" "github.com/databricks/databricks-sdk-go/service/ml"
"github.com/databricks/databricks-sdk-go/service/serving"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
) )
@ -113,7 +114,7 @@ func TestRunAsErrorForPipelines(t *testing.T) {
err := diags.Error() err := diags.Error()
configPath := filepath.FromSlash("run_as/not_allowed/pipelines/databricks.yml") configPath := filepath.FromSlash("run_as/not_allowed/pipelines/databricks.yml")
assert.EqualError(t, err, fmt.Sprintf("pipelines are not supported when the current deployment user is different from the bundle's run_as identity. Please deploy as the run_as identity. Location of the unsupported resource: %s:14:5. Current identity: jane@doe.com. Run as identity: my_service_principal", configPath)) assert.ErrorContains(t, err, "pipelines do not support", configPath)
} }
func TestRunAsNoErrorForPipelines(t *testing.T) { func TestRunAsNoErrorForPipelines(t *testing.T) {
@ -151,8 +152,7 @@ func TestRunAsErrorForModelServing(t *testing.T) {
diags := bundle.Apply(ctx, b, mutator.SetRunAs()) diags := bundle.Apply(ctx, b, mutator.SetRunAs())
err := diags.Error() err := diags.Error()
configPath := filepath.FromSlash("run_as/not_allowed/model_serving/databricks.yml") assert.ErrorContains(t, err, "model_serving_endpoints do not support")
assert.EqualError(t, err, fmt.Sprintf("model_serving_endpoints are not supported when the current deployment user is different from the bundle's run_as identity. Please deploy as the run_as identity. Location of the unsupported resource: %s:14:5. Current identity: jane@doe.com. Run as identity: my_service_principal", configPath))
} }
func TestRunAsNoErrorForModelServingEndpoints(t *testing.T) { func TestRunAsNoErrorForModelServingEndpoints(t *testing.T) {
@ -190,8 +190,7 @@ func TestRunAsErrorWhenBothUserAndSpSpecified(t *testing.T) {
diags := bundle.Apply(ctx, b, mutator.SetRunAs()) diags := bundle.Apply(ctx, b, mutator.SetRunAs())
err := diags.Error() err := diags.Error()
configPath := filepath.FromSlash("run_as/not_allowed/both_sp_and_user/databricks.yml") assert.ErrorContains(t, err, "run_as section cannot specify both user_name and service_principal_name")
assert.EqualError(t, err, fmt.Sprintf("run_as section must specify exactly one identity. A service_principal_name \"my_service_principal\" is specified at %s:6:27. A user_name \"my_user_name\" is defined at %s:7:14", configPath, configPath))
} }
func TestRunAsErrorNeitherUserOrSpSpecified(t *testing.T) { func TestRunAsErrorNeitherUserOrSpSpecified(t *testing.T) {
@ -233,3 +232,53 @@ func TestRunAsErrorNeitherUserOrSpSpecifiedAtTargetOverride(t *testing.T) {
configPath := filepath.FromSlash("run_as/not_allowed/neither_sp_nor_user_override/override.yml") configPath := filepath.FromSlash("run_as/not_allowed/neither_sp_nor_user_override/override.yml")
assert.EqualError(t, err, fmt.Sprintf("run_as section must specify exactly one identity. Neither service_principal_name nor user_name is specified at %s:4:12", configPath)) assert.EqualError(t, err, fmt.Sprintf("run_as section must specify exactly one identity. Neither service_principal_name nor user_name is specified at %s:4:12", configPath))
} }
func TestLegacyRunAs(t *testing.T) {
b := load(t, "./run_as/legacy")
ctx := context.Background()
bundle.ApplyFunc(ctx, b, func(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
b.Config.Workspace.CurrentUser = &config.User{
User: &iam.User{
UserName: "jane@doe.com",
},
}
return nil
})
diags := bundle.Apply(ctx, b, mutator.SetRunAs())
assert.NoError(t, diags.Error())
assert.Len(t, b.Config.Resources.Jobs, 3)
jobs := b.Config.Resources.Jobs
// job_one and job_two should have the same run_as identity as the bundle.
assert.NotNil(t, jobs["job_one"].RunAs)
assert.Equal(t, "my_service_principal", jobs["job_one"].RunAs.ServicePrincipalName)
assert.Equal(t, "", jobs["job_one"].RunAs.UserName)
assert.NotNil(t, jobs["job_two"].RunAs)
assert.Equal(t, "my_service_principal", jobs["job_two"].RunAs.ServicePrincipalName)
assert.Equal(t, "", jobs["job_two"].RunAs.UserName)
// job_three should retain it's run_as identity.
assert.NotNil(t, jobs["job_three"].RunAs)
assert.Equal(t, "my_service_principal_for_job", jobs["job_three"].RunAs.ServicePrincipalName)
assert.Equal(t, "", jobs["job_three"].RunAs.UserName)
// Assert owner permissions for pipelines are set.
pipelines := b.Config.Resources.Pipelines
assert.Len(t, pipelines["nyc_taxi_pipeline"].Permissions, 2)
assert.Equal(t, "CAN_VIEW", pipelines["nyc_taxi_pipeline"].Permissions[0].Level)
assert.Equal(t, "my_user_name", pipelines["nyc_taxi_pipeline"].Permissions[0].UserName)
assert.Equal(t, "IS_OWNER", pipelines["nyc_taxi_pipeline"].Permissions[1].Level)
assert.Equal(t, "my_service_principal", pipelines["nyc_taxi_pipeline"].Permissions[1].ServicePrincipalName)
// Assert other resources are not affected.
assert.Equal(t, ml.Model{Name: "skynet"}, *b.Config.Resources.Models["model_one"].Model)
assert.Equal(t, catalog.CreateRegisteredModelRequest{Name: "skynet (in UC)"}, *b.Config.Resources.RegisteredModels["model_two"].CreateRegisteredModelRequest)
assert.Equal(t, ml.Experiment{Name: "experiment_one"}, *b.Config.Resources.Experiments["experiment_one"].Experiment)
assert.Equal(t, serving.CreateServingEndpoint{Name: "skynet"}, *b.Config.Resources.ModelServingEndpoints["model_serving_one"].CreateServingEndpoint)
}

View File

@ -0,0 +1,39 @@
package config_tests
import (
"context"
"fmt"
"path/filepath"
"testing"
"github.com/databricks/cli/bundle"
"github.com/databricks/cli/bundle/config/validate"
"github.com/databricks/cli/libs/diag"
"github.com/stretchr/testify/require"
)
func TestSyncIncludeExcludeNoMatchesTest(t *testing.T) {
b := loadTarget(t, "./override_sync", "development")
diags := bundle.ApplyReadOnly(context.Background(), bundle.ReadOnly(b), validate.ValidateSyncPatterns())
require.Len(t, diags, 3)
require.NoError(t, diags.Error())
require.Equal(t, diags[0].Severity, diag.Warning)
require.Equal(t, diags[0].Summary, "Pattern dist does not match any files")
require.Equal(t, diags[0].Location.File, filepath.Join("override_sync", "databricks.yml"))
require.Equal(t, diags[0].Location.Line, 17)
require.Equal(t, diags[0].Location.Column, 11)
require.Equal(t, diags[0].Path.String(), "sync.exclude[0]")
summaries := []string{
fmt.Sprintf("Pattern %s does not match any files", filepath.Join("src", "*")),
fmt.Sprintf("Pattern %s does not match any files", filepath.Join("tests", "*")),
}
require.Equal(t, diags[1].Severity, diag.Warning)
require.Contains(t, summaries, diags[1].Summary)
require.Equal(t, diags[2].Severity, diag.Warning)
require.Contains(t, summaries, diags[2].Summary)
}

View File

@ -16,9 +16,9 @@ func New() *cobra.Command {
authentication for the Databricks CLI and SDKs work please refer to the documentation authentication for the Databricks CLI and SDKs work please refer to the documentation
linked below. linked below.
AWS: https://docs.databricks.com/en/dev-tools/auth/index.html AWS: https://docs.databricks.com/dev-tools/auth/index.html
Azure: https://learn.microsoft.com/en-us/azure/databricks/dev-tools/auth Azure: https://learn.microsoft.com/azure/databricks/dev-tools/auth
GCP: https://docs.gcp.databricks.com/en/dev-tools/auth/index.html`, GCP: https://docs.gcp.databricks.com/dev-tools/auth/index.html`,
} }
var perisistentAuth auth.PersistentAuth var perisistentAuth auth.PersistentAuth

View File

@ -4,6 +4,7 @@ import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
"runtime"
"time" "time"
"github.com/databricks/cli/libs/auth" "github.com/databricks/cli/libs/auth"
@ -32,9 +33,53 @@ func configureHost(ctx context.Context, persistentAuth *auth.PersistentAuth, arg
const minimalDbConnectVersion = "13.1" const minimalDbConnectVersion = "13.1"
func newLoginCommand(persistentAuth *auth.PersistentAuth) *cobra.Command { func newLoginCommand(persistentAuth *auth.PersistentAuth) *cobra.Command {
defaultConfigPath := "~/.databrickscfg"
if runtime.GOOS == "windows" {
defaultConfigPath = "%USERPROFILE%\\.databrickscfg"
}
cmd := &cobra.Command{ cmd := &cobra.Command{
Use: "login [HOST]", Use: "login [HOST]",
Short: "Authenticate this machine", Short: "Log into a Databricks workspace or account",
Long: fmt.Sprintf(`Log into a Databricks workspace or account.
This command logs you into the Databricks workspace or account and saves
the authentication configuration in a profile (in %s by default).
This profile can then be used to authenticate other Databricks CLI commands by
specifying the --profile flag. This profile can also be used to authenticate
other Databricks tooling that supports the Databricks Unified Authentication
Specification. This includes the Databricks Go, Python, and Java SDKs. For more information,
you can refer to the documentation linked below.
AWS: https://docs.databricks.com/dev-tools/auth/index.html
Azure: https://learn.microsoft.com/azure/databricks/dev-tools/auth
GCP: https://docs.gcp.databricks.com/dev-tools/auth/index.html
This command requires a Databricks Host URL (using --host or as a positional argument
or implicitly inferred from the specified profile name)
and a profile name (using --profile) to be specified. If you don't specify these
values, you'll be prompted for values at runtime.
While this command always logs you into the specified host, the runtime behaviour
depends on the existing profiles you have set in your configuration file
(at %s by default).
1. If a profile with the specified name exists and specifies a host, you'll
be logged into the host specified by the profile. The profile will be updated
to use "databricks-cli" as the auth type if that was not the case before.
2. If a profile with the specified name exists but does not specify a host,
you'll be prompted to specify a host. The profile will be updated to use the
specified host. The auth type will be updated to "databricks-cli" if that was
not the case before.
3. If a profile with the specified name exists and specifies a host, but you
specify a host using --host (or as the [HOST] positional arg), the profile will
be updated to use the newly specified host. The auth type will be updated to
"databricks-cli" if that was not the case before.
4. If a profile with the specified name does not exist, a new profile will be
created with the specified host. The auth type will be set to "databricks-cli".
`, defaultConfigPath, defaultConfigPath),
} }
var loginTimeout time.Duration var loginTimeout time.Duration

View File

@ -29,10 +29,11 @@ func (c *profileMetadata) IsEmpty() bool {
return c.Host == "" && c.AccountID == "" return c.Host == "" && c.AccountID == ""
} }
func (c *profileMetadata) Load(ctx context.Context, skipValidate bool) { func (c *profileMetadata) Load(ctx context.Context, configFilePath string, skipValidate bool) {
cfg := &config.Config{ cfg := &config.Config{
Loaders: []config.Loader{config.ConfigFile}, Loaders: []config.Loader{config.ConfigFile},
Profile: c.Name, ConfigFile: configFilePath,
Profile: c.Name,
} }
_ = cfg.EnsureResolved() _ = cfg.EnsureResolved()
if cfg.IsAws() { if cfg.IsAws() {
@ -117,7 +118,7 @@ func newProfilesCommand() *cobra.Command {
go func() { go func() {
ctx := cmd.Context() ctx := cmd.Context()
t := time.Now() t := time.Now()
profile.Load(ctx, skipValidate) profile.Load(ctx, iniFile.Path(), skipValidate)
log.Debugf(ctx, "Profile %q took %s to load", profile.Name, time.Since(t)) log.Debugf(ctx, "Profile %q took %s to load", profile.Name, time.Since(t))
wg.Done() wg.Done()
}() }()

View File

@ -36,7 +36,7 @@ func TestProfiles(t *testing.T) {
// Load the profile // Load the profile
profile := &profileMetadata{Name: "profile1"} profile := &profileMetadata{Name: "profile1"}
profile.Load(ctx, true) profile.Load(ctx, configFile, true)
// Check the profile // Check the profile
assert.Equal(t, "profile1", profile.Name) assert.Equal(t, "profile1", profile.Name)

View File

@ -18,8 +18,26 @@ import (
func newRunCommand() *cobra.Command { func newRunCommand() *cobra.Command {
cmd := &cobra.Command{ cmd := &cobra.Command{
Use: "run [flags] KEY", Use: "run [flags] KEY",
Short: "Run a resource (e.g. a job or a pipeline)", Short: "Run a job or pipeline update",
Args: root.MaximumNArgs(1), Long: `Run the job or pipeline identified by KEY.
The KEY is the unique identifier of the resource to run. In addition to
customizing the run using any of the available flags, you can also specify
keyword or positional arguments as shown in these examples:
databricks bundle run my_job -- --key1 value1 --key2 value2
Or:
databricks bundle run my_job -- value1 value2 value3
If the specified job uses job parameters or the job has a notebook task with
parameters, the first example applies and flag names are mapped to the
parameter names.
If the specified job does not use job parameters and the job has a Python file
task or a Python wheel task, the second example applies.
`,
} }
var runOptions run.Options var runOptions run.Options
@ -62,7 +80,7 @@ func newRunCommand() *cobra.Command {
args = append(args, id) args = append(args, id)
} }
if len(args) != 1 { if len(args) < 1 {
return fmt.Errorf("expected a KEY of the resource to run") return fmt.Errorf("expected a KEY of the resource to run")
} }
@ -71,6 +89,12 @@ func newRunCommand() *cobra.Command {
return err return err
} }
// Parse additional positional arguments.
err = runner.ParseArgs(args[1:], &runOptions)
if err != nil {
return err
}
runOptions.NoWait = noWait runOptions.NoWait = noWait
if restart { if restart {
s := cmdio.Spinner(ctx) s := cmdio.Spinner(ctx)
@ -107,10 +131,6 @@ func newRunCommand() *cobra.Command {
} }
cmd.ValidArgsFunction = func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { cmd.ValidArgsFunction = func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
if len(args) > 0 {
return nil, cobra.ShellCompDirectiveNoFileComp
}
b, diags := root.MustConfigureBundle(cmd) b, diags := root.MustConfigureBundle(cmd)
if err := diags.Error(); err != nil { if err := diags.Error(); err != nil {
cobra.CompErrorln(err.Error()) cobra.CompErrorln(err.Error())
@ -123,7 +143,16 @@ func newRunCommand() *cobra.Command {
return nil, cobra.ShellCompDirectiveNoFileComp return nil, cobra.ShellCompDirectiveNoFileComp
} }
return run.ResourceCompletions(b), cobra.ShellCompDirectiveNoFileComp if len(args) == 0 {
return run.ResourceCompletions(b), cobra.ShellCompDirectiveNoFileComp
} else {
// If we know the resource to run, we can complete additional positional arguments.
runner, err := run.Find(b, args[0])
if err != nil {
return nil, cobra.ShellCompDirectiveError
}
return runner.CompleteArgs(args[1:], toComplete)
}
} }
return cmd return cmd

View File

@ -21,7 +21,7 @@ type syncFlags struct {
} }
func (f *syncFlags) syncOptionsFromBundle(cmd *cobra.Command, b *bundle.Bundle) (*sync.SyncOptions, error) { func (f *syncFlags) syncOptionsFromBundle(cmd *cobra.Command, b *bundle.Bundle) (*sync.SyncOptions, error) {
opts, err := files.GetSyncOptions(cmd.Context(), b) opts, err := files.GetSyncOptions(cmd.Context(), bundle.ReadOnly(b))
if err != nil { if err != nil {
return nil, fmt.Errorf("cannot get sync options: %w", err) return nil, fmt.Errorf("cannot get sync options: %w", err)
} }

View File

@ -8,6 +8,7 @@ import (
"text/template" "text/template"
"github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle"
"github.com/databricks/cli/bundle/config/validate"
"github.com/databricks/cli/bundle/phases" "github.com/databricks/cli/bundle/phases"
"github.com/databricks/cli/cmd/bundle/utils" "github.com/databricks/cli/cmd/bundle/utils"
"github.com/databricks/cli/cmd/root" "github.com/databricks/cli/cmd/root"
@ -47,7 +48,7 @@ const warningTemplate = `{{ "Warning" | yellow }}: {{ .Summary }}
const summaryTemplate = `Name: {{ .Config.Bundle.Name | bold }} const summaryTemplate = `Name: {{ .Config.Bundle.Name | bold }}
Target: {{ .Config.Bundle.Target | bold }} Target: {{ .Config.Bundle.Target | bold }}
Workspace: Workspace:
Host: {{ .Config.Workspace.Host | bold }} Host: {{ .WorkspaceClient.Config.Host | bold }}
User: {{ .Config.Workspace.CurrentUser.UserName | bold }} User: {{ .Config.Workspace.CurrentUser.UserName | bold }}
Path: {{ .Config.Workspace.RootPath | bold }} Path: {{ .Config.Workspace.RootPath | bold }}
@ -106,8 +107,9 @@ func renderTextOutput(cmd *cobra.Command, b *bundle.Bundle, diags diag.Diagnosti
// Print validation summary. // Print validation summary.
t := template.Must(template.New("summary").Funcs(validateFuncMap).Parse(summaryTemplate)) t := template.Must(template.New("summary").Funcs(validateFuncMap).Parse(summaryTemplate))
err := t.Execute(cmd.OutOrStdout(), map[string]any{ err := t.Execute(cmd.OutOrStdout(), map[string]any{
"Config": b.Config, "Config": b.Config,
"Trailer": buildTrailer(diags), "Trailer": buildTrailer(diags),
"WorkspaceClient": b.WorkspaceClient(),
}) })
if err != nil { if err != nil {
return err return err
@ -140,6 +142,7 @@ func newValidateCommand() *cobra.Command {
} }
diags = diags.Extend(bundle.Apply(ctx, b, phases.Initialize())) diags = diags.Extend(bundle.Apply(ctx, b, phases.Initialize()))
diags = diags.Extend(bundle.Apply(ctx, b, validate.Validate()))
if err := diags.Error(); err != nil { if err := diags.Error(); err != nil {
return err return err
} }

View File

@ -136,6 +136,10 @@ func (i *installer) Upgrade(ctx context.Context) error {
if err != nil { if err != nil {
return fmt.Errorf("installer: %w", err) return fmt.Errorf("installer: %w", err)
} }
err = i.installPythonDependencies(ctx, ".")
if err != nil {
return fmt.Errorf("python dependencies: %w", err)
}
return nil return nil
} }

View File

@ -403,6 +403,12 @@ func TestUpgraderWorksForReleases(t *testing.T) {
newHome := copyTestdata(t, "testdata/installed-in-home") newHome := copyTestdata(t, "testdata/installed-in-home")
ctx = env.WithUserHomeDir(ctx, newHome) ctx = env.WithUserHomeDir(ctx, newHome)
// Install stubs for the python calls we need to ensure were run in the
// upgrade process.
ctx, stub := process.WithStub(ctx)
stub.WithStderrFor(`python[\S]+ -m pip install .`, "[mock pip install]")
stub.WithStdoutFor(`python[\S]+ install.py`, "setting up important infrastructure")
py, _ := python.DetectExecutable(ctx) py, _ := python.DetectExecutable(ctx)
py, _ = filepath.Abs(py) py, _ = filepath.Abs(py)
ctx = env.Set(ctx, "PYTHON_BIN", py) ctx = env.Set(ctx, "PYTHON_BIN", py)
@ -420,4 +426,17 @@ func TestUpgraderWorksForReleases(t *testing.T) {
r := internal.NewCobraTestRunnerWithContext(t, ctx, "labs", "upgrade", "blueprint") r := internal.NewCobraTestRunnerWithContext(t, ctx, "labs", "upgrade", "blueprint")
r.RunAndExpectOutput("setting up important infrastructure") r.RunAndExpectOutput("setting up important infrastructure")
// Check if the stub was called with the 'python -m pip install' command
pi := false
for _, call := range stub.Commands() {
if strings.HasSuffix(call, "-m pip install .") {
pi = true
break
}
}
if !pi {
t.Logf(`Expected stub command 'python[\S]+ -m pip install .' not found`)
t.FailNow()
}
} }

View File

@ -30,7 +30,7 @@ func (f *syncFlags) syncOptionsFromBundle(cmd *cobra.Command, args []string, b *
return nil, fmt.Errorf("SRC and DST are not configurable in the context of a bundle") return nil, fmt.Errorf("SRC and DST are not configurable in the context of a bundle")
} }
opts, err := files.GetSyncOptions(cmd.Context(), b) opts, err := files.GetSyncOptions(cmd.Context(), bundle.ReadOnly(b))
if err != nil { if err != nil {
return nil, fmt.Errorf("cannot get sync options: %w", err) return nil, fmt.Errorf("cannot get sync options: %w", err)
} }

Some files were not shown because too many files have changed in this diff Show More